Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
7,600
|
<ASSISTANT_TASK:>
Python Code:
!pip install nnabla-ext-cuda100
!git clone https://github.com/sony/nnabla-examples.git
%cd nnabla-examples
import numpy as np
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.utils.save as save
import os
from utils.neu.checkpoint_util import save_checkpoint, load_checkpoint
from utils.neu.save_nnp import save_nnp
import struct
import zlib
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download
def load_mnist(train=True):
'''
Load MNIST dataset images and labels from the original page by Yan LeCun or the cache file.
Args:
train (bool): The testing dataset will be returned if False. Training data has 60000 images, while testing has 10000 images.
Returns:
numpy.ndarray: A shape of (#images, 1, 28, 28). Values in [0.0, 1.0].
numpy.ndarray: A shape of (#images, 1). Values in {0, 1, ..., 9}.
'''
if train:
image_uri = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
label_uri = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'
else:
image_uri = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'
label_uri = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'
logger.info('Getting label data from {}.'.format(label_uri))
r = download(label_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size = struct.unpack('>II', data[0:8])
labels = np.frombuffer(data[8:], np.uint8).reshape(-1, 1)
r.close()
logger.info('Getting label data done.')
logger.info('Getting image data from {}.'.format(image_uri))
r = download(image_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size, height, width = struct.unpack('>IIII', data[0:16])
images = np.frombuffer(data[16:], np.uint8).reshape(
size, 1, height, width)
r.close()
logger.info('Getting image data done.')
return images, labels
class MnistDataSource(DataSource):
'''
Get data directly from MNIST dataset from Internet(yann.lecun.com).
'''
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, train=True, shuffle=False, rng=None):
super(MnistDataSource, self).__init__(shuffle=shuffle)
self._train = train
self._images, self._labels = load_mnist(train)
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(MnistDataSource, self).reset()
@property
def images(self):
Get copy of whole data with a shape of (N, 1, H, W).
return self._images.copy()
@property
def labels(self):
Get copy of whole label with a shape of (N, 1).
return self._labels.copy()
def data_iterator_mnist(batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False):
'''
Provide DataIterator with :py:class:`MnistDataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`MnistDataSource` is able to store all data into memory.
For example,
.. code-block:: python
with data_iterator_mnist(True, batch_size) as di:
for data in di:
SOME CODE TO USE data.
'''
return data_iterator(MnistDataSource(train=train, shuffle=shuffle, rng=rng),
batch_size,
rng,
with_memory_cache,
with_file_cache)
def generator(z, maxh=256, test=False, output_hidden=False):
Building generator network which takes (B, Z, 1, 1) inputs and generates
(B, 1, 28, 28) outputs.
# Define shortcut functions
def bn(x):
# Batch normalization
return PF.batch_normalization(x, batch_stat=not test)
def upsample2(x, c):
# Twice upsampling with deconvolution.
return PF.deconvolution(x, c, kernel=(4, 4), pad=(1, 1), stride=(2, 2), with_bias=False)
assert maxh / 4 > 0
with nn.parameter_scope("gen"):
# (Z, 1, 1) --> (256, 4, 4)
with nn.parameter_scope("deconv1"):
d1 = F.elu(bn(PF.deconvolution(z, maxh, (4, 4), with_bias=False)))
# (256, 4, 4) --> (128, 8, 8)
with nn.parameter_scope("deconv2"):
d2 = F.elu(bn(upsample2(d1, maxh / 2)))
# (128, 8, 8) --> (64, 16, 16)
with nn.parameter_scope("deconv3"):
d3 = F.elu(bn(upsample2(d2, maxh / 4)))
# (64, 16, 16) --> (32, 28, 28)
with nn.parameter_scope("deconv4"):
# Convolution with kernel=4, pad=3 and stride=2 transforms a 28 x 28 map
# to a 16 x 16 map. Deconvolution with those parameters behaves like an
# inverse operation, i.e. maps 16 x 16 to 28 x 28.
d4 = F.elu(bn(PF.deconvolution(
d3, maxh / 8, (4, 4), pad=(3, 3), stride=(2, 2), with_bias=False)))
# (32, 28, 28) --> (1, 28, 28)
with nn.parameter_scope("conv5"):
x = F.tanh(PF.convolution(d4, 1, (3, 3), pad=(1, 1)))
if output_hidden:
return x, [d1, d2, d3, d4]
return x
def discriminator(x, maxh=256, test=False, output_hidden=False):
Building discriminator network which maps a (B, 1, 28, 28) input to
a (B, 1).
# Define shortcut functions
def bn(xx):
# Batch normalization
return PF.batch_normalization(xx, batch_stat=not test)
def downsample2(xx, c):
return PF.convolution(xx, c, (3, 3), pad=(1, 1), stride=(2, 2), with_bias=False)
assert maxh / 8 > 0
with nn.parameter_scope("dis"):
# (1, 28, 28) --> (32, 16, 16)
with nn.parameter_scope("conv1"):
c1 = F.elu(bn(PF.convolution(x, maxh / 8,
(3, 3), pad=(3, 3), stride=(2, 2), with_bias=False)))
# (32, 16, 16) --> (64, 8, 8)
with nn.parameter_scope("conv2"):
c2 = F.elu(bn(downsample2(c1, maxh / 4)))
# (64, 8, 8) --> (128, 4, 4)
with nn.parameter_scope("conv3"):
c3 = F.elu(bn(downsample2(c2, maxh / 2)))
# (128, 4, 4) --> (256, 4, 4)
with nn.parameter_scope("conv4"):
c4 = bn(PF.convolution(c3, maxh, (3, 3),
pad=(1, 1), with_bias=False))
# (256, 4, 4) --> (1,)
with nn.parameter_scope("fc1"):
f = PF.affine(c4, 1)
if output_hidden:
return f, [c1, c2, c3, c4]
return f
# Get context.
from nnabla.ext_utils import get_extension_context
ctx = get_extension_context('cudnn')
nn.set_default_context(ctx)
# Create CNN network for both training and testing.
# TRAIN
# Fake path
batch_size = 64
learning_rate = 0.0002
max_iter = 20000
weight_decay = 0.0001
z = nn.Variable([batch_size, 100, 1, 1])
fake = generator(z)
fake.persistent = True # Not to clear at backward
pred_fake = discriminator(fake)
loss_gen = F.mean(F.sigmoid_cross_entropy(
pred_fake, F.constant(1, pred_fake.shape)))
fake_dis = fake.get_unlinked_variable(need_grad=True)
pred_fake_dis = discriminator(fake_dis)
loss_dis = F.mean(F.sigmoid_cross_entropy(
pred_fake_dis, F.constant(0, pred_fake_dis.shape)))
# Real path
x = nn.Variable([batch_size, 1, 28, 28])
pred_real = discriminator(x)
loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,
F.constant(1, pred_real.shape)))
# Create Solver.
solver_gen = S.Adam(learning_rate, beta1=0.5)
solver_dis = S.Adam(learning_rate, beta1=0.5)
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
start_point = 0
# If necessary, load weights and solver state info from specified checkpoint files.
# start_point = load_checkpoint(
# specified_checkpoint, {"gen": solver_gen, "dis": solver_dis})
# Create monitor.
import nnabla.monitor as M
monitor_path = 'tmp.monitor.dcgan'
monitor = M.Monitor(monitor_path)
monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
monitor_loss_dis = M.MonitorSeries(
"Discriminator loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
monitor_fake = M.MonitorImageTile(
"Fake images", monitor, normalize_method=lambda x: (x + 1) / 2.)
data = data_iterator_mnist(batch_size, True)
# Training loop.
for i in range(start_point, max_iter):
# Training forward
image, _ = data.next()
x.d = image / 255. - 0.5 # [0, 255] to [-1, 1]
z.d = np.random.randn(*z.shape)
# Generator update.
solver_gen.zero_grad()
loss_gen.forward(clear_no_need_grad=True)
loss_gen.backward(clear_buffer=True)
solver_gen.weight_decay(weight_decay)
solver_gen.update()
monitor_fake.add(i, fake)
monitor_loss_gen.add(i, loss_gen.d.copy())
# Discriminator update.
solver_dis.zero_grad()
loss_dis.forward(clear_no_need_grad=True)
loss_dis.backward(clear_buffer=True)
solver_dis.weight_decay(weight_decay)
solver_dis.update()
monitor_loss_dis.add(i, loss_dis.d.copy())
monitor_time.add(i)
from IPython.display import Image, display
for i in range(20):
print("At iteration",(i+1)*1000-1)
display(Image('tmp.monitor.dcgan/Fake-images/{:06d}.png'.format((i+1)*1000-1)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's start by importing dependencies.
Step2: Now let's define a function to download and load MNIST. This function will pass image-label pairs to DataSource class, which we will define next.
Step5: Now we define a data iterator to pass the images and labels to actual computation graphs.
Step7: Generative adversarial networks and its variations, including DCGAN, have adversarial setting, in which generator network and discriminator network compete against each other. Let's define our generator network first. We implement the network with 4 consecutive deconvolution layers, each of which is followed by batch normalization and ELU non-linear activation. Then, we apply convolutional layer followed by hyperbolic tangent.
Step9: We then define the other part of adversarial setting, discriminator network. We can think of discriminator as the reverse of generator network, where we have 4 consecutive convolutional layers instead of deconvolution layers. All convolutional layers are again followed by batch normalization and ELU activation, except for the last convolutional layer. Finally, we apply affine.
Step10: Now we are ready to get into the training part. Let's first define the context to use GPU and define hyperparameters. We also need to define the noise variable z, which is fed into the generator network to generate fake images, which in turn will be fed to the discriminator network. We define separate losses for generator and discriminator networks, both with sigmoid cross entropy.
Step11: Likewise, let's define a variable for real images, also to be input to discriminator network. Note that discriminator loss will have to account for both fake and real images.
Step12: We also define separate solvers for generator and discriminator. Following the paper, we used Adam for both. Let's also define monitor variables to keep track of the progress. This will also save some of the generated fake images.
Step13: We are now ready to go! We call the data iterator that we defined earlier, and define training loop, in which we alternate between generator update and discriminator update.
Step14: Now that we're done training, let's see how the generated fake images evolved throughout the training!
|
7,601
|
<ASSISTANT_TASK:>
Python Code:
# populate namespace with pylab functions and stuff
%pylab inline
# import NEST & NEST rasterplot
import nest
import nest.raster_plot
# simulation parameters
simtime = 1000. # simulation time (ms)
dt = 0.1 # simulation resolution (ms)
# network parameters
gamma = 0.25 # relative number of inhibitory connections
NE = 5000 # number of excitatory neurons (10.000 in [1])
NI = int(gamma * NE) # number of inhibitory neurons
N_rec = 50 # record from 100 (50 e + 50 i) neurons
CE = 1000 # indegree from excitatory neurons
CI = int(gamma * CE) # indegree from inhibitory neurons
# synapse parameters
w = 0.1 # excitatory synaptic weight (mV)
g = 5. # relative inhibitory to excitatory synaptic weight
d = 1.5 # synaptic transmission delay (ms)
# neuron paramters
V_th = 20. # spike threshold (mV)
tau_m = 20. # membrane time constant (ms)
neuron_params = {
'C_m': 1.0, # membrane capacity (pF)
'E_L': 0., # resting membrane potential (mV)
'I_e': 0., # external input current (pA)
'V_m': 0., # membrane potential (mV)
'V_reset': 10., # reset membrane potential after a spike (mV)
'V_th': V_th, #
't_ref': 2.0, # refractory period (ms)
'tau_m': tau_m, #
}
# external input parameters
nu_th = V_th / (w * tau_m) # external rate needed to evoke activity (spikes/ms)
nu_ex = 2.0 * nu_th # set external rate above threshold
p_rate = 1e3 * nu_ex # external rate (spikes/s)
# configure kernel
nest.ResetKernel()
nest.SetKernelStatus({
'resolution': dt, # set simulation resolution
'print_time': True, # enable printing of simulation progress (-> terminal)
'local_num_threads': 2 # use two threads to build & simulate the network
})
# set default parameters for neurons and create neurons
nest.SetDefaults('iaf_psc_delta', neuron_params)
neurons_e = nest.Create('iaf_psc_delta', NE)
neurons_i = nest.Create('iaf_psc_delta', NI)
# create poisson generator and set 'rate' to p_rate
pgen = nest.Create('poisson_generator', params={'rate': p_rate})
# create spike detectors
spikes_e = nest.Create('spike_detector')
spikes_i = nest.Create('spike_detector')
nest.SetStatus(spikes_e, [{'withtime': True,
'withgid': True,
'to_file': False}])
nest.SetStatus(spikes_i, [{'withtime': True,
'withgid': True,
'to_file': False}])
# create excitatory connections
# synapse specification
syn_exc = {'delay': d, 'weight': w}
# connection specification
conn_exc = {'rule': 'fixed_indegree', 'indegree': CE}
# connect stuff
nest.Connect(neurons_e, neurons_e, conn_exc, syn_exc)
nest.Connect(neurons_e, neurons_i, conn_exc, syn_exc)
# create inhibitory connections
# synapse specification
syn_inh = {'delay': d, 'weight': - g * w}
# connection specification
conn_inh = {'rule': 'fixed_indegree', 'indegree': CI}
# connect stuff
nest.Connect(neurons_i, neurons_e, conn_inh, syn_inh)
nest.Connect(neurons_i, neurons_i, conn_inh, syn_inh)
# connect poisson generator using the excitatory connection weight
nest.Connect(pgen, neurons_i, syn_spec=syn_exc)
nest.Connect(pgen, neurons_e, syn_spec=syn_exc)
# connect N_rec excitatory / inhibitory neurons to spike detector
nest.Connect(neurons_e[:N_rec], spikes_e)
nest.Connect(neurons_i[:N_rec], spikes_i)
# simulate
nest.Simulate(simtime)
# calculate mean firing rate in spikes per second
events_ex = nest.GetStatus(spikes_e, 'n_events')[0]
events_in = nest.GetStatus(spikes_i, 'n_events')[0]
rate_ex = events_ex / simtime / N_rec * 1e3
rate_in = events_in / simtime / N_rec * 1e3
mean_rate = (rate_ex + rate_in) / 2.
print('Mean firing rate: {} Hz'.format(mean_rate))
# raster plot of spiking activity using nest.raster_plot
nest.raster_plot.from_device(spikes_e, hist=True)
nest.raster_plot.from_device(spikes_i, hist=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Configure NEST
Step3: Create neurons and devices
Step4: Create connections
Step5: Simulate and analyse
|
7,602
|
<ASSISTANT_TASK:>
Python Code:
data = dict(
labels=['Bananas','Apples','Oranges','Watermelons','Grapes','Kiwis'],
values=[4000,8000,3000,1600,1000,2500]
)
out = StdCharts.HBar(data)
HTML(out)
StdCharts.save(out,'report_chart.png')
data = dict(
labels=['Bananas','Apples','Oranges','Watermelons','Grapes','Kiwis'],
values=[4000,8000,3000,1600,1000,2500]
)
out = StdCharts.HBar(
data = data,
width=600,
color='#996666',
title='Fruit prices ($)',
source='Source: Local Market',
fill='rgb(220, 75, 30)',
values_sorted=True,
paper='#f6f6f6',
locale='en',
font='Tahoma')
HTML(out)
data = dict(
labels=[2010,2011,2012,2013,2014,2015,2016,2017],
values=[4000,8000,3000,1600,1000,2500,4300,4200]
)
out = StdCharts.VBar(data=data,paper='#f3f3f3')
HTML(out)
StdCharts.save(out,'report_vertical.png')
data = dict(
labels=['City ABCD','City EFGHM','City OPQRSTUV'],
values=[200,100,500]
)
out = StdCharts.VBar(data=data,values_sorted=True,color="#446699",fill="#999999")
HTML(out)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can also save your chart with the save method
Step2: Example 2
Step3: Vertical Bar Charts
Step4: Example 4
|
7,603
|
<ASSISTANT_TASK:>
Python Code:
def sumar(x, y): # Defino la función sumar
return x + y
x = 4
z = 5
print sumar(x, z) # Invoco a la función sumar con los parámetros x y z
print sumar(1, 2) # Invoco a la función sumar con los parámetros 1 y 2
print sumar('hola ', 'mundo')
def sumar(x, y):
Suma dos elementos y retorna el resultado.
return x + y
help(sumar)
def factorial(n):
Calcula el factorial de un número de forma iterativa.
for i in range(1,n):
n *= i
return n
fact_5 = factorial(5) # calculo el factorial de 5 y lo guardo en fact_5
factorial(10) # calculo el factorial de 10 y no lo guardo en ninguna variable
def imprimir(msg):
print msg
imprimir('Hola mundo')
resultado = imprimir('Hola mundo')
print resultado
print type(imprimir)
print type(sumar)
print sumar
mi_suma = sumar
print mi_suma(1, 2)
def sumar(*args):
suma = 0
for e in args:
suma += e
return suma
print sumar(1, 2)
print sumar(1, 2, 3, 4, 5)
print sumar(*[1, 2, 3, 4, 5, 6])
print sumar
def imprimir_parametros(param1, param2, param3=5, param4="es el cuarto parametro", param5=False):
print param1, param2, param3, param4, param5
imprimir_parametros(1, 2, 3, 4, 5)
imprimir_parametros(1, 2, 3, 4)
imprimir_parametros(1, 2, 3)
imprimir_parametros(1, 2)
imprimir_parametros(1)
imprimir_parametros(1, 2, param5="Este el parametro5")
imprimir_parametros(1, 2, param4=4)
imprimir_parametros(param5=1, param3=2, param1=3, param2=4, param4=5)
parametros = {
'param1': 1,
'param2': 2,
'param3': 3,
'param4': 4,
'param5': 5,
}
imprimir_parametros(**parametros)
import datetime
print datetime.date.today()
from datetime import date, time
print date.today()
print time(1, 23, 32)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Aunque en ningún momento indicamos que lo que tiene que sumar son números, por lo que también puede sumar strings
Step3: Además, a esta función le podría agregar comentarios (docstrings) para que al hacer help de la función se entienda qué es lo que hace
Step5: El resultado de la función no es necesario que lo guarde en una variable, tranquilamente la puedo invocar y perder ese valor.
Step6: ¿Y qué sucede si no pongo el return en una función?
Step7: ¿Y si le asigno el resultado de este procedimiento a una variable?
Step8: Por lo que no existen los procedimientos, los "procedimientos" en realidad son funciones que devuelven None.
Step9: Ahora, si la función es un tipo de dato, significa que se lo puedo asignar a una variable...
Step10: ¿Y qué pasa si ahora llamo a mi_suma con los parámetros 1 y 2 como hice antes con sumar?
Step11: Lista de parámetros
Step12: Parámetros por defecto
Step13: Para esta función nos pueden pasar 2, 3, 4 o 5 parámetros. Si nos pasan los 5 parámetros, se imprimirán los valores que nos pasen
Step14: Ahora, si nos pasan 4 parámetros, el intérprete asumirá que el faltante es param5, por lo que dicho parámetro tomará el valor False. Y lo mismo pasa con el resto de los parámetros.
Step15: ¿Y si le pasamos un sólo parámetro?.
Step16: ¿Y qué pasa si quiero pasarle los parámetros 1, 2 y el 5?. <br>
Step17: Lo mismo pasa si lo que quiero cambiar es el cuatro parámetro
Step18: Hasta se pueden nombrar todos los parámetros
Step19: Si bien puede parecer innecesario el uso de parámetros nombrados, en algunas oportunidades se suele usar para agregar claridad y legibilidad al código, y en otros para pasarle un diccionario
Step20: Uso de módulos externos
Step21: Pero a diferencia de Pascal y C, acá podemos elegir importar una función o algo en particular de ese módulo, en lugar de traerlo todo. Para eso tendríamos que poner en primer lugar la cláusula from, luego el nombre del módulo y a continuación la cláusula import todo lo que queremos importar separada por comas. <br>
|
7,604
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-ll', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
7,605
|
<ASSISTANT_TASK:>
Python Code:
# Python 2 and 3 compatibility
# pip install future
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# отключим предупреждения Anaconda
import warnings
warnings.simplefilter('ignore')
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
salaries = pd.Series([400, 300, 200, 250],
index = ['Andrew', 'Bob',
'Charles', 'Ann'])
print(salaries)
salaries[salaries > 250]
print(salaries.Andrew == salaries['Andrew'])
salaries['Carl'] = np.nan
salaries.fillna(salaries.median(), inplace=True)
salaries
salaries.c = np.nan # Series can contain missing values
print(salaries)
print('Second element of salaries is', salaries[1], '\n')
# Smart indexing
print(salaries[:3], '\n')
print('There are', len(salaries[salaries > 0]), 'positive elements in salaries\n')
# Series obects can be the arguments for Numpy functions
print(np.exp(salaries))
df1 = pd.DataFrame(np.random.randn(5, 3),
index=['o1', 'o2', 'o3', 'o4', 'o5'],
columns=['f1', 'f2', 'f3'])
df1
df2 = pd.DataFrame({'A': np.random.random(5),
'B': ['a', 'b', 'c', 'd', 'e'],
'C': np.arange(5) > 2})
df2
print('The element in position 3, B is', df2.at[3, 'B'], '\n')
print(df2.loc[[1, 4], ['A', 'B']])
df2.at[2, 'B'] = 'f'
df2
df2.loc[5] = [3.1415, 'c', False]
df2
df1.columns = ['A', 'B', 'C']
df3 = df1.append(df2)
df3
df1.at['o2', 'A'] = np.nan
df1.at['o4', 'C'] = np.nan
df1
pd.isnull(df1)
df1.dropna(how='any')
df1.fillna(0)
df = pd.read_csv('../data/beauty.csv', sep = ';')
print(df.shape)
print(df.columns.values)
df.head(10)
df.head(4)
df.describe()
df.sort_values(by='wage', ascending = False).head()
df.sort_values(by=['female', 'wage'],
ascending=[True, False]).head()
df['goodhlth'].mean()
df[df['female'] == 1].head()
df[(df['goodhlth'] == 1) &
(df['female'] == 1)].head()
df[(df['female'] == 0)]['wage'].mean() - \
df[(df['female'] == 1)]['wage'].mean()
df[(df['female'] == 0) & (df['union'] == 1)
& (df['exper'] < 10)]['wage'].max()
df.apply(np.mean)
df['looks'].describe()
g = df.groupby('looks')
for (i, sub_df) in g:
print(sub_df['wage'].mean(), sub_df['looks'].mean())
d1 = g.get_group(1)
d1
pd.scatter_matrix(df[['wage', 'exper', 'educ', 'looks']],
figsize=(15, 15), diagonal='kde')
plt.show()
df['looks'].hist()
df.hist(color = 'k', bins = 30, figsize=(15,10))
plt.show()
df.boxplot(column='exper', by='looks')
plt.show()
df.boxplot(column='exper', by=['female', 'black'],
figsize=(10,10))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Основными структурами данных в Pandas являются классы Series и DataFrame. Первый из них представляет собой одномерный индексированный массив данных некоторого фиксированного типа. Второй - это двухмерная структура данных, представляющая собой таблицу, каждый столбец которой содержит данные одного типа. Можно представлять её как словарь объектов типа Series. Структура DataFrame отлично подходит для представления реальных данных
Step2: Индексирование возможно в виде s.Name или s['Name'].
Step3: Series поддерживает пропуски в данных.
Step4: Объекты Series похожи на ndarray и могут быть переданы в качестве аргументов большинству функций из Numpy.
Step5: DataFrame
Step6: Альтернативным способом является создание DataFrame из словаря numpy массивов или списков.
Step7: Обращение к элементам (или целым кускам фрейма)
Step8: Изменение элементов и добавление новых
Step9: Обработка пропущенных значений
Step10: Булева маска для пропущенных значений (True - там, где был пропуск, иначе - False)
Step11: Можно удалить все строки, где есть хотя бы один пропуск.
Step12: Пропуски можно заменить каким-то значением.
Step13: Пример первичного анализа данных с Pandas
Step14: Посмотрим на размер данных и названия признаков.
Step15: При работе с большими объёмами данных бывает удобно посмотреть только на небольшие части фрейма (например, начало).
Step16: Метод describe показывает основные статистические характеристики данных по каждому признаку
Step17: DataFrame можно отсортировать по значению какого-нибудь из признаков. В нашем случае, например, по размеру заработной платы.
Step18: Индексация и извлечение данных
Step19: Очень удобной является логическая индексация DataFrame по одному столбцу. Выглядит она следующим образом
Step20: Какова максимальная заработная плата среди мужчин, имеющих членство в профсоюзе, и с опытом работы до 10 лет?
Step21: Применение функции к каждому столбцу
Step22: Группирование данных в зависимости от значения признака looks и подсчет среднего значения по каждому столбцу в каждой группе.
Step23: Обращение к конкретной группе
Step24: Визуализация в Pandas
Step25: Для каждого признака можно построить отдельную гистограмму
Step26: Или сразу для всех
Step27: Полезным также является график типа box plot ("ящик с усами"). Он позволяет компактно визуализировать основные характеристики (медиану, нижний и верхний квартили, минимальное и максимальное значение, выбросы) распределения признаков.
Step28: Можно сделать это, сгруппировав данные по какому-либо другому признаку
|
7,606
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
df = pd.read_csv('Manhattan.txt', sep='\s+')
df.drop('id', axis=1, inplace=True)
df.tail()
tdf = df.iloc[:, 0:-3]
tdf.tail()
pca = PCA(n_components=8)
pca.fit(tdf)
np.set_printoptions(precision=6, suppress=True)
print('各主成份方差贡献占比:', end=' ')
print(pca.explained_variance_ratio_)
emotion_score = pd.DataFrame(pca.transform(tdf))
emotion_score.rename(columns={'0': 'emotion_score'}, inplace=True)
# 第一个主成份
pd.concat([df, emotion_score.loc[:, 0]], axis=1, join='inner').rename(index=str, columns={0: 'emotion_score'}).to_csv('Manhattan_score_raw.txt', index=None, sep='\t')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: how to index a given part of a DataFrame have been a problem for me.
Step2: 取一个主成分, 解释方差0.917864
|
7,607
|
<ASSISTANT_TASK:>
Python Code:
# TensorBoard Helper Functions and Constants
# Directory to export TensorBoard summary statistics, graph data, etc.
TB_DIR = '/tmp/tensorboard/tf_basics'
def _start_tb(d):
Private function that calls `tensorboard` shell command
args:
d: The desired directory to launch in TensorBoard
!tensorboard --port=6006 --logdir=$d
def start_tensorboard(d=TB_DIR):
Starts TensorBoard from the notebook in a separate thread.
Prevents Jupyter Notebook from halting while TensorBoard runs.
import threading
threading.Thread(target=_start_tb, args=(TB_DIR,)).start()
del threading
def stop_tensorboard():
Kills all TensorBoard processes
!ps -aef | grep "tensorboard" | tr -s ' ' | cut -d ' ' -f2 | xargs kill -KILL
def reset_tensorboard():
stop_tensorboard()
start_tensorboard()
# Import core TensorFlow libraries
import tensorflow as tf
import numpy as np
# `tf.placeholder` creates an "input" node- we will give it value when we run our model
a = tf.placeholder(tf.int32, name="input_a")
b = tf.placeholder(tf.int32, name="input_b")
# `tf.add` creates an addition node
c = tf.add(a, b, name="add")
# `tf.mul` creates a multiplication node
d = tf.mul(a, b, name="multiply")
# Add up the results of the previous two nodes
out = tf.add(c, d, name="output")
# OPTIONAL
# Create a scalar summary, which will log the value we tell it to when executed
# In this case, we'll tell it to save our output value from `out`
# This works in tandem with our SummaryWriter below
# To create the summary, we pass in two parameters:
# 1. A 'tag', which gives a label to the data
# 2. The value(s) we'd like to save
# We also give a `name` to the summary itself (does not affect behavior)
out_summary = tf.scalar_summary("output", out, name="output_summary")
# Start a session
sess = tf.Session()
# Create a "feed_dict" dictionary to define input values
# Keys to dictionary are handles to our placeholders
# Values to dictionary are values we'd like to feed in
feed_dict = { a: 4, b: 3 }
# OPTIONAL
# Opens a `SummaryWriter` object, which can write stats about the graph to disk
# We pass in two parameters into the SummaryWriter constructor
# The first is a string, specifies a directory to write to.
# (Note: `TB_DIR` was specified earlier. "TB" stands for TensorBoard
# The second parameter passes in our graph. This allows us to visualize our graph later
writer = tf.train.SummaryWriter(TB_DIR, graph=sess.graph)
# Execute the graph using `sess.run()`, passing in two parameters:
# The first parameter, `fetches` lists which node(s) we'd like to receive as output
# The second parameter, `feed_dict`, feeds in key-value pairs
# to input or override the value of nodes
# In this case, we run both the output value, as well as its scalar summary
result, summary = sess.run([out, out_summary], feed_dict=feed_dict)
# Print output with fun formatting
print("(({0}*{1}) + ({0}+{1})) = ".format(feed_dict[a], feed_dict[b]) + str(result))
# We add the summary to our SummaryWriter, which will write them to disk:
# Normally, these summaries are used to generate statistics over time
# TensorBoard doesn't do well visualizing single points, so we fake a "global_step"
# With two points, it will generate a line
writer.add_summary(summary, global_step=0)
writer.add_summary(summary, global_step=100)
# Use SummaryWriter.flush() to write all previously added summaries to disk
# This will also flush the list of summaries so that none are added twice
writer.flush()
# We're done! Close down our Session and SummaryWriter to tidy up.
# Note that SummaryWriter.close() automatically calls flush(), so any summaries left will be written to disk
sess.close()
writer.close()
# Start TensorBoard
start_tensorboard()
# Once you are done, stop TensorBoard
stop_tensorboard()
# 0-D tensor (scalar)
t_0d_py = 4
# 1-D tensor (vector)
t_1d_py = [1, 2, 3]
# 2-D tensor (matrix)
t_2d_py = [[1, 2],
[3, 4],
[5, 6]]
# 3-D tensor
t_3d_py = [[[0, 0], [0, 1], [0, 2]],
[[1, 0], [1, 1], [1, 2]],
[[2, 0], [2, 1], [2, 2]]]
# 0-D tensor (scalar)
t_0d_np = np.array(4, dtype=np.int32)
# 1-D tensor (vector)
t_1d_np = np.array([1, 2, 3], dtype=np.int64)
# 2-D tensor (matrix)
t_2d_np = np.array([[1, 2],
[3, 4],
[5, 6]],
dtype=np.float32)
# 3-D tensor
t_3d_np = np.array([[[0, 0], [0, 1], [0, 2]],
[[1, 0], [1, 1], [1, 2]],
[[2, 0], [2, 1], [2, 2]]],
dtype=np.int32)
# Just to show that they are equivalent
(tf.float32 == np.float32 and
tf.float64 == np.float64 and
tf.int8 == np.int8 and
tf.int16 == np.int16 and
tf.int32 == np.int32 and
tf.int64 == np.int64 and
tf.uint8 == np.uint8 and
tf.bool == np.bool and
tf.complex64 == np.complex64)
tf_string_tensor = [b"first", b"second", b"third"]
# Shapes corresponding to scalars
# Note that either lists or tuples can be used
s_0d_list = []
s_0d_tuple = ()
# Shape corresponding to a vector of length 3
s_1d = [3]
# Shape corresponding to a 2-by-3 matrix
s_2d = (2, 3)
# Shape corresponding to a 4-by-4-by-4 cube tensor
s_3d = [4, 4, 4]
s_var = [None, 4, 4]
with tf.Session() as sess:
get_shape = tf.shape([[[1, 2, 3], [1, 2, 3]],
[[2, 4, 6], [2, 4, 6]],
[[3, 6, 9], [3, 6, 9]],
[[4, 8, 12], [4, 8, 12]]])
shape = sess.run(get_shape)
print("Shape of tensor: " + str(shape))
my_const = tf.constant(np.array([1, 2, 3], dtype=np.float32))
# Initialize some tensors
a = np.array([1, 2], dtype=np.int32)
b = np.array([3, 4], dtype=np.int32)
# `tf.add()` creates an "add" Operation and places it in the graph
# The variable `c` will be a handle to the output of the operation
# This output can be passed on to other Operations!
c = tf.add(a, b)
sess = tf.Session()
print(sess.run(c))
c_result = sess.run(c)
c = tf.add(a, b, name="my_add_operation")
# Create a new graph - constructor takes no parameters
new_graph = tf.Graph()
#DEFAULT GRAPH
co = tf.constant(4)
with new_graph.as_default():
a = tf.add(3, 4)
b = tf.mul(a, 2)
other_co = tf.constant(6)
sess = tf.Session(graph=tf.get_default_graph())
sess.run(co)
default_graph = tf.get_default_graph()
with new_graph.as_default():
print(new_graph is tf.get_default_graph())
print(new_graph is tf.get_default_graph())
# A session with the default graph launched
# Equivalent to `tf.Session(graph=tf.get_default_graph())`
sess_default = tf.Session()
# A session with new_graph launched
sess_new = tf.Session(graph=new_graph)
sess_default.run(tf.add(3,2))
# Create Operations, Tensors, etc (using the default graph)
a = tf.add(3, 4)
b = tf.mul(a, 5)
# Define a dictionary that says to replace the value of `a` with 15
replace_dict = {a: 15}
# Run the session without feed_dict
# Prints (3 + 4) * 5 = 35
print(sess_default.run(b))
# Run the session, passing in `replace_dict` as the value to `feed_dict`
# Prints 15 * 5 = 75 instead of 7 * 5 = 35
print(sess_default.run(b, feed_dict=replace_dict))
a = tf.placeholder(tf.int32, name="my_placeholder")
b = tf.add(a, 3)
# This raises an error:
try:
sess_default.run(b)
except tf.errors.InvalidArgumentError as e:
print(e.message)
# Create feed dictionary
feed_dict = {a: 8}
# Now it works!
print(sess_default.run(b, feed_dict=feed_dict))
# Closing out the Sessions we opened up
sess_default.close()
sess_new.close()
my_var = tf.Variable(0, name="my_var")
sess = tf.Session()
sess.run(tf.initialize_all_variables())
increment = my_var.assign(my_var + 1)
for i in range(10):
print(sess.run(increment))
# Re-initialize variables
sess.run(tf.initialize_all_variables())
# Start incrementing, beginning from 0 again
for i in range(10):
print(sess.run(increment))
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: TensorBoard Jupyter Notebook Helpers
Step4: TensorFlow Fundamentals
Step5: My First TensorFlow Graph
Step 1
Step6: Step 2
Step7: Step 3ish
Step8: Go to your server's ip at port 6006 (replace 1.2.3.4 with your server's ip)
Step9: Here's the main code all together without as many comments in the way
Step10: NumPy Arrays
Step11: Data types
Step12: The primary exception to when you should not use np.array() is when defining a Tensor of strings. When using strings, just use standard Python lists. It's best practice to include the b prefix in front of strings to explicitly define the strings as byte-arrays
Step13: Tensor Shapes
Step14: You can use the tf.shape Operation to get the shape value of Tensor objects
Step15: Constants
Step16: If a set of values is going to be reused all throughout your graph, using constants is an easy way to place that value directly into the graph (instead of reading from a NumPy array or Python list directly)
Step17: The important thing to remember is that Operations do not execute when created- that's the reason tf.add([1, 2],[3, 4]) doesn't return the value [4, 6] immediately. It must be passed into a Session.run() method, which we'll cover in more detail below.
Step18: The majority of the TensorFlow API is Operations. tf.scalar_summary and tf.placeholder were both Operations we used in the first example- remember that we had to run the out_summary variable in Session.run()
Step19: Getting into the habit of adding names to your Operations now will save you headaches later on.
Step20: However, operations (such as tf.add and tf.mul) are added to the default graph when created. To add operations to your new graph, use a with statement along with the graph's as_default() method. This makes that graph the default while inside of the with block
Step21: The default graph, other than being set to the default, is no different than any other Graph. If you need to get a handle to the default graph, use the tf.get_default_graph function
Step22: Note
Step23: Most TensorFlow models will not require more than one graph per script. However, you may find this useful when defining two independent models side-by-side. Additionally, there are mechanisms to export and import external models and load them in as Graph objects, which can allow you to feed the output of existing models into your new model (or vice versa). We won't be able to demonstrate these now, but see Graph.as_graph_def() and tf.import_graph_def in the TensorFlow API for more information.
Step24: Running Sessions
Step25: TensorFlow will only perform calculations necessary to compute the values specified in fetches, so it won't waste time if you only need to run a small part of a large, complicated graph.
Step26: When using placeholders,TensorFlow insists that any calls to Session.run() include feed_dict values for all placeholders
Step27: TensorFlow Variables
Step28: However, even though the object has been created, the value of the Variable has to be initialized separately with either of the tf.initialize_variables() or, more commonly, tf.initialize_all_variables() Operations. Remember that Operations must be passed into Session.run() to be executed
Step29: Having value initialization separated from object creation allows us to reinitialize the variable later if we'd like.
Step30: You may notice that if you run the previous code multiple times in the notebook, the value persists and continues to climb. The Variable's state is maintained by the Session object, and the state will persist unless either the session is close, the Variable is re-initialized, or a new value is assigned to the Variable.
Step31: Trainable
|
7,608
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bcc', 'sandbox-1', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
7,609
|
<ASSISTANT_TASK:>
Python Code:
!pip install --upgrade watson_developer_cloud
import requests
import json
import os
from os.path import join, dirname
from watson_developer_cloud import SpeechToTextV1
# @hidden_cell
url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize"
username= "$USERNAME"
password= "$PASSWORD"
file1 = "https://github.com/krondor/nlp-dsx-pot/raw/master/aging.mp3"
file2 = "http://podcast.c-span.org/podcast/SBHAR1020.mp3"
file3 = "https://github.com/krondor/nlp-dsx-pot/raw/master/reagan-thatcher.mp3"
!wget {file1} -O aging.mp3 -nc
# Define Local File for CURL
filepath = './reagan-thatcher.mp3'
!curl -X POST -u {username}:{password} \
--header "Content-Type: audio/mp3" \
--data-binary @{filepath} \
"https://stream.watsonplatform.net/speech-to-text/api/v1/recognize"
!wget {file3} -O reagan-thatcher.mp3 -nc
filename = os.path.basename(filepath)
audio = open(filename,'rb')
files_input = {
"audioFile":(filename,audio,'audio/mp3')
}
# Define Speech to Text Feature Parameters
params = (
('model', 'en-US_NarrowbandModel'),
('speaker_labels', 'true')
)
response = requests.post(
url,
params=params,
auth=(username, password),
headers={"Content-Type": "audio/mp3"},
files=files_input)
response_data = response.json()
print('status_code: {} (reason: {})'.format(response.status_code, response.reason))
import pandas as pd
data = []
for item in response_data['results']:
for trans in item['alternatives']:
data.append(dict({'transcript':trans['transcript'], 'confidence':trans['confidence']}))
# Create Pandas Data Frame of Transcript Results with Confidence
df = pd.DataFrame(data)
# View Snippet
df.head(5)
%matplotlib inline
import numpy as np
import matplotlib
matplotlib.style.use('ggplot')
plt.figure();
df['confidence'].plot.hist()
speech_to_text = SpeechToTextV1(
username='d9f7864a-0869-40ee-98af-58e23e996a0e',
password='nYmlwq7VBTZz',
x_watson_learning_opt_out=False
)
!wget {file1} -O aging.mp3 -nc
filepath = './aging.mp3' # path to file
filename = os.path.basename(filepath)
print(json.dumps(speech_to_text.models(), indent=2))
print(json.dumps(speech_to_text.get_model('en-US_BroadbandModel'), indent=2))
with open(filename, 'rb') as audio_file:
print(json.dumps(speech_to_text.recognize(
audio_file, content_type='audio/mp3', timestamps=True,
word_confidence=True, speaker_labels=True),
indent=2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication Handling and File Details
Step2: Basic transcription with CURL
Step3: Output Handling with Requests
Step4: Pandas from Results
Step5: Confidence Spread
Step6: Speech to Text with Watson Developer Cloud SDK
|
7,610
|
<ASSISTANT_TASK:>
Python Code:
fname = io.download_occultation_times(outdir='../data/')
print(fname)
tlefile = io.download_tle(outdir='../data')
print(tlefile)
times, line1, line2 = io.read_tle_file(tlefile)
tstart = '2018-09-27T12:00:00'
tend = '2018-09-29T12:10:00'
orbits = planning.sunlight_periods(fname, tstart, tend)
pa = planning.get_nustar_roll(tstart, 0)
print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa))
offset = [0., 0.]*u.arcsec
for ind, orbit in enumerate(orbits):
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, parallax_correction=True)
print("Orbit: {}".format(ind))
print("Orbit start: {} Orbit end: {}".format(orbit[0].isoformat(), orbit[1].isoformat()))
print('Aim time: {} RA (deg): {} Dec (deg): {}'.format(midTime.isoformat(), sky_pos[0], sky_pos[1]))
print("")
# Just use the first orbit...or choose one. This may download a ton of deltat.preds, which is a known
# bug to be fixed.
orbit = orbits[20]
planning.make_mosaic(orbit, write_output=True, make_regions=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the NuSTAR TLE archive.
Step2: Here is where we define the observing window that we want to use.
Step3: We want to know how to orient NuSTAR for the Sun.
Step4: Set up the offset you want to use here
Step5: Loop over each orbit and correct the pointing for the same heliocentric pointing position.
Step6: This is where you actually make the Mosaic
|
7,611
|
<ASSISTANT_TASK:>
Python Code:
import mne
from mne.preprocessing import maxwell_filter
data_path = mne.datasets.sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
ctc_fname = data_path + '/SSS/ct_sparse_mgh.fif'
fine_cal_fname = data_path + '/SSS/sss_cal_mgh.dat'
raw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False)
raw.info['bads'] = ['MEG 2443', 'EEG 053', 'MEG 1032', 'MEG 2313'] # set bads
# Here we don't use tSSS (set st_duration) because MGH data is very clean
raw_sss = maxwell_filter(raw, cross_talk=ctc_fname, calibration=fine_cal_fname)
tmin, tmax = -0.2, 0.5
event_id = {'Auditory/Left': 1}
events = mne.find_events(raw, 'STI 014')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=[], exclude='bads')
for r, kind in zip((raw, raw_sss), ('Raw data', 'Maxwell filtered data')):
epochs = mne.Epochs(r, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(eog=150e-6),
preload=False)
evoked = epochs.average()
evoked.plot(window_title=kind, ylim=dict(grad=(-200, 250),
mag=(-600, 700)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Preprocess with Maxwell filtering
Step3: Select events to extract epochs from, pick M/EEG channels, and plot evoked
|
7,612
|
<ASSISTANT_TASK:>
Python Code:
# THINGS TO IMPORT
# This is a baseline set of libraries I import by default if I'm rushed for time.
%matplotlib inline
import codecs # load UTF-8 Content
import json # load JSON files
import pandas as pd # Pandas handles dataframes
import numpy as np # Numpy handles lots of basic maths operations
import matplotlib.pyplot as plt # Matplotlib for plotting
import seaborn as sns # Seaborn for beautiful plots
from dateutil import * # I prefer dateutil for parsing dates
import math # transformations
import statsmodels.formula.api as smf # for doing statistical regression
import statsmodels.api as sm # access to the wider statsmodels library, including R datasets
from collections import Counter # Counter is useful for grouping and counting
import scipy
from patsy import dmatrices
# High School and Beyond Dataset
# https://nces.ed.gov/surveys/hsb/
import urllib2
import os.path
if(os.path.isfile("hsb.dta")!=True):
response = urllib2.urlopen("http://www.stata-press.com/data/mlmus3/hsb.dta")
if(response.getcode()==200):
f = open("hsb.dta","w")
f.write(response.read())
f.close()
hsb_df = pd.read_stata("hsb.dta")
print hsb_df[['mathach','ses']].describe()
print
print "CROSSTAB"
print pd.crosstab(hsb_df['sector'], [hsb_df['female'],hsb_df['minority']])
#generate de-meaned mathach
sgp = school_gp.to_dict()
def school_mathach(f):
return float(f.mathach) - sgp['mathach'][f.schoolid]
hsb_df['school_mathach'] = hsb_df.apply(school_mathach, 1)
#make the Side-by-Side Boxplot
fig = plt.figure(num=None, figsize=(8, 20), dpi=80, edgecolor='k')
ax = fig.add_subplot(121)
hsb_df.boxplot("mathach", by="schoolid", ax=ax, vert=False)
plt.title("School Math Achievement", fontsize="16")
ax2 = fig.add_subplot(122)
hsb_df.boxplot("school_mathach", by="schoolid", ax=ax2, vert=False)
plt.title("De-Meaned School Math Achievement", fontsize="16")
plt.show()
result = smf.ols(formula = "mathach ~ ses",
data = hsb_df).fit()
print "==========================================================="
print "MODEL 1: Regressing Student Math Achievement on Student SES"
print result.summary()
plt.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
plt.scatter(hsb_df.ses, hsb_df.mathach, marker=".", color="c")
student_line, = plt.plot(hsb_df['ses'], result.predict(), "-", color="c")
#plt.title("Predicting Math Achievement from SES Across all 7185 students", fontsize="16")
school_gp = hsb_df.groupby("schoolid").aggregate(np.mean)
result = smf.ols(formula = "mathach ~ ses",
data = school_gp).fit()
print
print "==================================================================="
print "MODEL 2: Regressing Mean School Math Achievement on Mean School SES"
print result.summary()
#plt.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
plt.scatter(school_gp.ses, school_gp.mathach, marker=".", color="r")
school_line, = plt.plot(school_gp.ses, result.predict(), "-", color="r")
plt.title("Predicting Math Achievement Scores from SES with Linear Regression", fontsize="16")
plt.legend([student_line, school_line], ['All Students', 'School Means'], fontsize="14")
plt.show()
# calculate the demeaned_ses for each student
def demeaned_ses(f):
return f.ses - school_gp.to_dict()['ses'][f['schoolid']]
# add the school mean SES to the dataframe for each student
def schoolmean_ses(f):
return school_gp.to_dict()['ses'][f['schoolid']]
hsb_df['demeaned_ses'] = hsb_df.apply(demeaned_ses, axis=1)
hsb_df['schoolmean_ses'] = hsb_df.apply(schoolmean_ses, axis=1)
result_school_covary = smf.ols(formula = "mathach ~ ses + schoolmean_ses",
data = hsb_df).fit()
print "MODEL: Regressing Student Math Achievement on De-meaned Student SES"
print result_school_covary.params
result = smf.ols(formula = "mathach ~ demeaned_ses",
data = hsb_df).fit()
print
print "MODEL: Regressing Student Math Achievement on De-meaned Student SES"
print result.params
print
print "Notice how the slope for *ses* is the same as the slope for *demeaned_ses* in the two models"
print
plt.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
plt.scatter(hsb_df.demeaned_ses, hsb_df.mathach, marker=".", color="darkgrey")
student_line, = plt.plot(hsb_df['demeaned_ses'], result.predict(), "-", color="darkgrey")
plt.title("Predicting Math Achievement Scores from De-meaned SES", fontsize="16")
plt.xlabel("De-meaned Socio-Economic Status", fontsize="14")
plt.ylabel("Math Achivement", fontsize="14")
plt.show()
# highlight the maximum, and minimum
max_school = school_gp[school_gp['ses'] == school_gp.ses.max()].index[0]
min_school = school_gp[school_gp['ses'] == school_gp.ses.min()].index[0]
hsb_df['fixed_preds'] = result_school_covary.predict()
plt.figure(num=None, figsize=(12, 6), dpi=80, edgecolor='k')
for schoolid in hsb_df.schoolid.unique():
if(schoolid!=max_school and schoolid!=min_school):
plt.scatter(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].mathach, marker=".", color="lightgrey")
for schoolid in hsb_df.schoolid.unique():
if(schoolid == max_school):
plt.scatter(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].mathach, marker=".", color="r")
maxline, = plt.plot(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].fixed_preds, "-", color="r")
elif(schoolid == min_school):
plt.scatter(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].mathach, marker=".", color="b")
minline, = plt.plot(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].fixed_preds, "-", color="b")
plt.legend([maxline, minline], ['School with Max SES', 'School with Min SES'], fontsize="12")
plt.title("Fixed Effects Model Predicting Math Achievement Scores from SES & School Mean SES", fontsize="16")
plt.xlabel("Socio-Economic Status", fontsize="14")
plt.ylabel("Math Achivement", fontsize="14")
plt.show()
##http://statsmodels.sourceforge.net/devel/mixed_linear.html
md = smf.mixedlm("mathach ~ ses", data=hsb_df, groups=hsb_df["schoolid"])
result = md.fit()
print result.summary()
plt.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
result = smf.ols(formula = "mathach ~ ses",
data = hsb_df).fit()
print "MODEL 1: Regressing Student Math Achievement on Student SES"
plt.scatter(hsb_df.ses, hsb_df.mathach, marker=".", color="c")
student_line, = plt.plot(hsb_df['ses'], result.predict(), "-", color="c")
school_gp = hsb_df.groupby("schoolid").aggregate(np.mean)
result = smf.ols(formula = "mathach ~ ses",
data = school_gp).fit()
print result.summary()
print
print "MODEL 2: Regressing Mean School Math Achievement on Mean School SES"
print result.summary()
#plt.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
plt.scatter(school_gp.ses, school_gp.mathach, marker=".", color="r")
school_line, = plt.plot(school_gp.ses, result.predict(), "-", color="r")
result = smf.ols(formula = "mathach ~ demeaned_ses",
data = hsb_df).fit()
print "MODEL 3: Regressing Student Math Achievement on De-meaned Student SES"
print result.summary()
#plt.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
demeaned_line, = plt.plot(hsb_df['demeaned_ses'], result.predict(), "-", color="darkgrey")
print
print "MODEL 4: Regressing Student Math Achievement on Student SES Grouped by School in a Random Effects Model"
md = smf.mixedlm("mathach ~ ses", data=hsb_df, groups=hsb_df["schoolid"])
result = md.fit()
print result.summary()
def predict(x, key, result):
return result.params.Intercept + result.params['ses']*x
ses = np.linspace(hsb_df.ses.min(), hsb_df.ses.max(), 100)
preds = [predict(x, 'ses',result) for x in ses]
multi_line, = plt.plot(ses, preds, "-", color="m")
plt.title("Predicting Math Achievement Scores from SES (schools=160) (students=7185)", fontsize="16")
plt.legend([student_line, school_line, multi_line, demeaned_line], ['All Students (Total)', 'School Means (Between)', "Random Effects", "De-Meaned (within group, Fixed)"])
plt.show()
##http://statsmodels.sourceforge.net/devel/mixed_linear.html
md = smf.mixedlm("mathach ~ ses", data=hsb_df, groups=hsb_df["schoolid"])
result = md.fit()
print result.summary()
#store the model results to a variable
models = {}
m = "Model1"
models[m] = {}
models[m]['result'] = result
def individual_residuals(f):
observed_individual = f.mathach
predicted_individual = result.params.Intercept + result.params['ses']*f.ses
return observed_individual - predicted_individual
def group_residuals(f):
observed_group = school_gp.to_dict()['mathach'][f.schoolid]
predicted_group = result.params.Intercept + result.params['ses']*f.schoolmean_ses
return predicted_group - observed_group
group_count = school_gp.count()[0]
indiv_count = hsb_df.count()[0]
resid_u = hsb_df.apply(group_residuals, 1)
models[m]["sigma_u"] = np.std(resid_u)
models[m]["sigma_u_err"] = models[m]["sigma_u"]/math.sqrt(group_count)
resid_e = hsb_df.apply(individual_residuals, 1)
models[m]["sigma_e"] = np.std(resid_e)
models[m]["sigma_e_err"] = models[m]["sigma_e"]/math.sqrt(indiv_count)
models[m]["icc"] = math.pow(models[m]["sigma_u"],2)/(math.pow(models[m]["sigma_u"],2) + math.pow(models[m]["sigma_e"],2))
models[m]["icc_err"] = icc/math.sqrt(group_count)
print " stdev stderr"
print "sigma_u (between group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_u"],
'e':models[m]["sigma_u_err"]}
print "sigma_e (within group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_e"],
'e':models[m]["sigma_e_err"]}
print "intraclass correlation: %(i).04f %(e).04f" % {'i':models[m]["icc"],
'e':models[m]["icc_err"]}
print
print "Z-Test of intraclass correlation:"
print " H0: icc = 0 in the population"
print " test-statistic: z=icc/SE(icc)"
print " decision rule: z>z_crit"
print " critical value: 1.96"
print " z = %(z).04f" %{'z':models[m]["icc"] /models[m]["icc_err"]}
# now generate the baseline model
md = smf.mixedlm("mathach ~ 1", data=hsb_df, groups=hsb_df["schoolid"])
result = md.fit()
print result.summary()
def individual_residuals(f):
observed_individual = f.mathach
predicted_individual = result.params.Intercept
return observed_individual - predicted_individual
def group_residuals(f):
observed_group = school_gp.to_dict()['mathach'][f.schoolid]
predicted_group = result.params.Intercept
return predicted_group - observed_group
group_count = school_gp.count()[0]
indiv_count = hsb_df.count()[0]
m = "Model0"
models[m] = {}
models[m]['result'] = result
resid_u = hsb_df.apply(group_residuals, 1)
models[m]["sigma_u"] = np.std(resid_u)
models[m]["sigma_u_err"] = models[m]["sigma_u"]/math.sqrt(group_count)
resid_e = hsb_df.apply(individual_residuals, 1)
models[m]["sigma_e"] = np.std(resid_e)
models[m]["sigma_e_err"] = models[m]["sigma_e"]/math.sqrt(indiv_count)
models[m]["icc"] = math.pow(models[m]["sigma_u"],2)/(math.pow(models[m]["sigma_u"],2) + math.pow(models[m]["sigma_e"],2))
models[m]["icc_err"] = icc/math.sqrt(group_count)
print " stdev stderr"
print "sigma_u (between group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_u"],
'e':models[m]["sigma_u_err"]}
print "sigma_e (within group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_e"],
'e':models[m]["sigma_e_err"]}
print "intraclass correlation: %(i).04f %(e).04f" % {'i':models[m]["icc"],
'e':models[m]["icc_err"]}
print
print "Z-Test of intraclass correlation:"
print " H0: icc = 0 in the population"
print " test-statistic: z=icc/SE(icc)"
print " decision rule: z>z_crit"
print " critical value: 1.96"
print " z = %(z).04f" %{'z':models[m]["icc"] /models[m]["icc_err"]}
m0 = "Model0"
m1 = "Model1"
r2_u = math.pow(models[m0]['sigma_u'], 2) - math.pow(models[m1]['sigma_u'], 2)/math.pow(models[m0]['sigma_u'], 2)
print "Pseudo R^2 for group variation: %(r).03f%%" % {'r':r2_u}
r2_e = math.pow(models[m0]['sigma_e'], 2) - math.pow(models[m1]['sigma_e'], 2)/math.pow(models[m0]['sigma_e'], 2)
print "Pseudo R^2 for individual variation: %(r).03f%%" % {'r':r2_e}
# in this dataset, sector refers to whether the school is catholic(1) or public(0)
from patsy import dmatrices
md = smf.mixedlm("mathach ~ ses + sector", data=hsb_df, groups=hsb_df["schoolid"])
result = md.fit()
print result.summary()
def individual_residuals(f):
observed_individual = f.mathach
predicted_individual = result.params.Intercept + result.params['ses']*f.ses + result.params['sector']*f.sector
return observed_individual - predicted_individual
def group_residuals(f):
observed_group = school_gp.to_dict()['mathach'][f.schoolid]
predicted_group = result.params.Intercept + result.params['ses']*f.schoolmean_ses + result.params['sector']*f.sector
return predicted_group - observed_group
group_count = school_gp.count()[0]
indiv_count = hsb_df.count()[0]
m = "Model2"
models[m] = {}
models[m]['result'] = result
resid_u = hsb_df.apply(group_residuals, 1)
models[m]["sigma_u"] = np.std(resid_u)
models[m]["sigma_u_err"] = models[m]["sigma_u"]/math.sqrt(group_count)
resid_e = hsb_df.apply(individual_residuals, 1)
models[m]["sigma_e"] = np.std(resid_e)
models[m]["sigma_e_err"] = models[m]["sigma_e"]/math.sqrt(indiv_count)
models[m]["icc"] = math.pow(models[m]["sigma_u"],2)/(math.pow(models[m]["sigma_u"],2) + math.pow(models[m]["sigma_e"],2))
models[m]["icc_err"] = icc/math.sqrt(group_count)
print " stdev stderr"
print "sigma_u (between group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_u"],
'e':models[m]["sigma_u_err"]}
print "sigma_e (within group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_e"],
'e':models[m]["sigma_e_err"]}
print "intraclass correlation: %(i).04f %(e).04f" % {'i':models[m]["icc"],
'e':models[m]["icc_err"]}
print
print "Z-Test of intraclass correlation:"
print " H0: icc = 0 in the population"
print " test-statistic: z=icc/SE(icc)"
print " decision rule: z>z_crit"
print " critical value: 1.96"
print " z = %(z).04f" %{'z':models[m]["icc"] /models[m]["icc_err"]}
print
m0 = "Model0"
m1 = "Model2"
r2_u = math.pow(models[m0]['sigma_u'], 2) - math.pow(models[m1]['sigma_u'], 2)/math.pow(models[m0]['sigma_u'], 2)
print "Pseudo R^2 for group variation: %(r).03f%%" % {'r':r2_u}
r2_e = math.pow(models[m0]['sigma_e'], 2) - math.pow(models[m1]['sigma_e'], 2)/math.pow(models[m0]['sigma_e'], 2)
print "Pseudo R^2 for individual variation: %(r).03f%%" % {'r':r2_e}
# in this dataset, sector refers to whether the school is catholic(1) or public(0)
from patsy import dmatrices
md = smf.mixedlm("mathach ~ ses + sector + sector:ses", data=hsb_df, groups=hsb_df["schoolid"])
result = md.fit()
print result.summary()
def individual_residuals(f):
observed_individual = f.mathach
predicted_individual = result.params.Intercept + result.params['ses']*f.ses + result.params['sector']*f.sector + result.params['sector:ses']*f.sector*f.ses
return observed_individual - predicted_individual
def group_residuals(f):
observed_group = school_gp.to_dict()['mathach'][f.schoolid]
predicted_group = result.params.Intercept + result.params['ses']*f.schoolmean_ses + result.params['sector']*f.sector + result.params['sector:ses']*f.sector*f.ses
return predicted_group - observed_group
group_count = school_gp.count()[0]
indiv_count = hsb_df.count()[0]
m = "Model3"
models[m] = {}
models[m]['result'] = result
resid_u = hsb_df.apply(group_residuals, 1)
models[m]["sigma_u"] = np.std(resid_u)
models[m]["sigma_u_err"] = models[m]["sigma_u"]/math.sqrt(group_count)
resid_e = hsb_df.apply(individual_residuals, 1)
models[m]["sigma_e"] = np.std(resid_e)
models[m]["sigma_e_err"] = models[m]["sigma_e"]/math.sqrt(indiv_count)
models[m]["icc"] = math.pow(models[m]["sigma_u"],2)/(math.pow(models[m]["sigma_u"],2) + math.pow(models[m]["sigma_e"],2))
models[m]["icc_err"] = icc/math.sqrt(group_count)
print " stdev stderr"
print "sigma_u (between group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_u"],
'e':models[m]["sigma_u_err"]}
print "sigma_e (within group variation): %(s).04f %(e).04f" % {'s':models[m]["sigma_e"],
'e':models[m]["sigma_e_err"]}
print "intraclass correlation: %(i).04f %(e).04f" % {'i':models[m]["icc"],
'e':models[m]["icc_err"]}
print
print "Z-Test of intraclass correlation:"
print " H0: icc = 0 in the population"
print " test-statistic: z=icc/SE(icc)"
print " decision rule: z>z_crit"
print " critical value: 1.96"
print " z = %(z).04f" %{'z':models[m]["icc"] /models[m]["icc_err"]}
print
m0 = "Model0"
m1 = "Model3"
r2_u = math.pow(models[m0]['sigma_u'], 2) - math.pow(models[m1]['sigma_u'], 2)/math.pow(models[m0]['sigma_u'], 2)
print "Pseudo R^2 for group variation: %(r).02f%%" % {'r':r2_u}
r2_e = math.pow(models[m0]['sigma_e'], 2) - math.pow(models[m1]['sigma_e'], 2)/math.pow(models[m0]['sigma_e'], 2)
print "Pseudo R^2 for individual variation: %(r).02f%%" % {'r':r2_e}
#step one: find prototypical values of a catholic and a public school with an SES of 0.
school_gp['p_abs_ses']=school_gp[np.isclose(school_gp.sector, 0.)].ses.map(lambda x: abs(x))
school_gp['c_abs_ses']=school_gp[np.isclose(school_gp.sector, 1.)].ses.map(lambda x: abs(x))
#public school with SES closest to 0: 1946
print school_gp[(np.isclose(school_gp.p_abs_ses,school_gp.p_abs_ses.min())) & (np.isclose(school_gp.sector, 0.))].ses
#catholic school with SES closest to 0: 5650
print school_gp[(np.isclose(school_gp.c_abs_ses,school_gp.c_abs_ses.min())) & (np.isclose(school_gp.sector, 1.))].ses
p_school = 1946
c_school = 5650
def predict(f):
return result.params.Intercept + result.params['ses']*f.ses + result.params['sector']*f.sector + result.params['sector:ses']*f.sector*f.ses
hsb_df['interaction_preds'] = hsb_df.apply(predict, 1)
plt.figure(num=None, figsize=(12, 6), dpi=80, edgecolor='k')
# PLOT A PREDICTION OF INDIVIDUAL MATH ACHIEVEMENT SCORES
# FOR TWO SCHOOLS
for schoolid in hsb_df.schoolid.unique():
if(schoolid!=max_school and schoolid!=min_school):
plt.scatter(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].mathach, marker=".", color="lightgrey")
for schoolid in hsb_df.schoolid.unique():
if(schoolid == p_school):
plt.scatter(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].mathach, marker=".", color="r")
p_line, = plt.plot(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].interaction_preds, "-", color="r")
elif(schoolid == c_school):
plt.scatter(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].mathach, marker=".", color="b")
c_line, = plt.plot(hsb_df[hsb_df.schoolid == schoolid].ses, hsb_df[hsb_df.schoolid == schoolid].interaction_preds, "-", color="b")
plt.legend([c_line, p_line], ['Students in a Catholic School with Mean SES', 'Students in a Public School with Mean SES'], fontsize="12")
plt.suptitle("Predicting Individual Math Achievement Scores from SES & Sector", fontsize="16")
plt.title("in a Multi-Level Random Effects Model, where SES=0", fontsize="16")
plt.xlabel("Socio-Economic Status", fontsize="14")
plt.ylabel("Math Achivement", fontsize="14")
plt.show()
# PLOT SCHOOL MEAN CATHOLIC AND PUBLIC SCHOOL MATH ACHIEVEMENT
plt.figure(num=None, figsize=(12, 6), dpi=80, edgecolor='k')
plt.scatter(hsb_df.ses, hsb_df.mathach, marker=".", color="lightgrey")
plt.scatter(school_gp[school_gp.sector==0.].ses, school_gp[school_gp.sector==0.].mathach, color="r")
plt.scatter(school_gp[school_gp.sector==1.].ses, school_gp[school_gp.sector==1.].mathach, color="b")
school_gp['interaction_preds'] = school_gp.apply(predict, 1)
c_line, = plt.plot(school_gp[np.isclose(school_gp.sector, 1.)].ses, school_gp[np.isclose(school_gp.sector, 1.)].interaction_preds, "-", color="b")
p_line, = plt.plot(school_gp[np.isclose(school_gp.sector, 0.)].ses, school_gp[np.isclose(school_gp.sector, 0.)].interaction_preds, "-", color="r")
plt.suptitle("Predicting School Math Achievement Scores from SES & Sector", fontsize="16")
plt.title("in a Multi-Level Random Effects Model", fontsize="16")
plt.legend([c_line, p_line], ['Catholic Schools', 'Public Schools'], fontsize="12")
plt.xlabel("Socio-Economic Status", fontsize="14")
plt.ylabel("Math Achivement", fontsize="14")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring Within-Group Variation and Between-Group Variation
Step2: Predicting Math Achievement from SES with Linear Models
Step3: Fixed Effect Model
Step4: Plotting the Fixed Effects Model for Individual Schools
Step5: Random Effects Model
Step6: In the above results, the ses coefficient of 2.390 is
Step7: Calculate Within Group Variance, Between Group Variance, and Intraclass Correlation of a Random Effects Models
Step8: In this case, we see that there is a low intraclass correlation, suggesting that most of the variation in math achievement scores is within schools, but that there is a significant difference between the math achievement of schools on average in the model as well (as indicated by the Z test).
Step9: Calculating Pseudo-$R^{2}$
Step10: In the above pseudo $R^{2}$ calculations, we see that our model of math achievement on SES accounts for 8.44% of the between-group variation and 46.43% of the within-group variation. This is consistent with our intraclass correlation, which shows that in the model there is much more within-group variation than betwen-group variation.
Step11: Now add an interaction between sector and SES
Step12: Plotting the Random Effects Model with a Level 2 Interaction
Step13: Plot Predictions for Catholic and Public Schools
|
7,613
|
<ASSISTANT_TASK:>
Python Code:
_MIN = - 2147483648
_MAX = 2147483648
class getnode :
def __init__(self , data ) :
self . data = data
self . left = None
self . right = None
def getlevel(root , data ) :
q =[]
level = 1
q . append(root )
q . append(None )
while(len(q ) ) :
temp = q[0 ]
q . pop(0 )
if(temp == None ) :
if len(q ) == 0 :
return 0
if(q[0 ] != None ) :
q . append(None )
level += 1
else :
if(temp . data == data ) :
return level
if(temp . left ) :
q . append(temp . left )
if(temp . right ) :
q . append(temp . right )
return 0
if __name__== ' __main __' :
root = getnode(20 )
root . left = getnode(10 )
root . right = getnode(30 )
root . left . left = getnode(5 )
root . left . right = getnode(15 )
root . left . right . left = getnode(12 )
root . right . left = getnode(25 )
root . right . right = getnode(40 )
level = getlevel(root , 30 )
if level != 0 :
print("level ▁ of ▁ node ▁ 30 ▁ is ", level )
else :
print("node ▁ 30 ▁ not ▁ found ")
level = getlevel(root , 12 )
if level != 0 :
print("level ▁ of ▁ node ▁ 12 ▁ is ", level )
else :
print("node ▁ 12 ▁ not ▁ found ")
level = getlevel(root , 25 )
if level != 0 :
print("level ▁ of ▁ node ▁ 25 ▁ is ", level )
else :
print("node ▁ 25 ▁ not ▁ found ")
level = getlevel(root , 27 )
if level != 0 :
print("level ▁ of ▁ node ▁ 27 ▁ is ", level )
else :
print("node ▁ 27 ▁ not ▁ found ")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,614
|
<ASSISTANT_TASK:>
Python Code:
import dataset as ds
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# Download database
ds.download('UCI HAR')
# Paths and filenames
DATASET_PATH = "../dataset/UCI HAR/UCI HAR Dataset"
TEST_RELPATH = "/test"
TRAIN_RELPATH = "/train"
VARS_FILENAMES = [
'body_acc_x_',
'body_acc_y_',
'body_acc_z_',
'body_gyro_x_',
'body_gyro_y_',
'body_gyro_z_',
'total_acc_x_',
'total_acc_y_',
'total_acc_z_']
LABELS_DEF_FILE = DATASET_PATH + "/activity_labels.txt"
# Make a list of files for training
trainFiles = [DATASET_PATH + TRAIN_RELPATH + '/Inertial Signals/' + var_filename + 'train.txt' for var_filename in VARS_FILENAMES]
# Make an tensor with data for training
dataTrain = ds.get_data(trainFiles, print_on = True)
# Show dataTrain dimensions
print dataTrain.shape
# Make a list of files for testing
testFiles = [DATASET_PATH + TEST_RELPATH + '/Inertial Signals/' + var_filename + 'test.txt' for var_filename in VARS_FILENAMES]
# Make an tensor with data for training
dataTest = ds.get_data(testFiles, print_on = True)
# Show dataTrain dimensions
print dataTest.shape
# Sensor 0 : Sample 1 (128 samples) (Training set)
fig = plt.figure()
plt.figure(figsize=(16,8))
dataTrain[0,1,:]
plt.plot(dataTrain[0,1,:])
plt.show()
# Sensor 1 : Sample 2 (128 samples) (Test set)
fig = plt.figure()
plt.figure(figsize=(16,8))
dataTest[1,2,:]
plt.plot(dataTest[1,2,:])
plt.show()
# Get the labels values for training samples
trainLabelsFile = DATASET_PATH + TRAIN_RELPATH + '/' + 'y_train.txt'
labelsTrain = ds.get_labels(trainLabelsFile, print_on = True)
print labelsTrain.shape #show dimension
# Get the labels values for testing samples
testLabelsFile = DATASET_PATH + TEST_RELPATH + '/' + 'y_test.txt'
labelsTest = ds.get_labels(testLabelsFile, print_on = True)
print labelsTest.shape #show dimension
# convert outputs to one-hot code
labelsTrainEncoded = ds.encode_onehot(labelsTrain)
labelsTestEncoded = ds.encode_onehot(labelsTest)
# Make a dictionary
labelDictionary = ds.make_labels_dictionary(LABELS_DEF_FILE)
print label_dict
print "\n"
sel = 300
print "label {} ({}) -> {}".format(labelsTrain[sel], label_dict[labelsTrain[sel]], labelsTrainEncoded[sel])
activityToPlot = 2.0
fig = plt.figure()
plt.figure(figsize=(16,8))
plt.title(label_dict[activityToPlot])
for idx, activity in enumerate(labelsTrain):
if activityToPlot == activity:
plt.plot(dataTrain[4,idx,:])
plt.show()
numLayers = 50;
lstm_cell = tf.contrib.rnn.BasicRNNCell(numLayers)
lstm_cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading Dataset
Step2: Filtered plots
Step3: RNN
|
7,615
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from BranchedGP import VBHelperFunctions as bplot
plt.style.use("ggplot")
%matplotlib inline
datafile = "syntheticdata/synthetic20.csv"
data = pd.read_csv(datafile, index_col=[0])
G = data.shape[1] - 2 # all data - time columns - state column
Y = data.iloc[:, 2:]
trueBranchingTimes = np.array([float(Y.columns[i][-3:]) for i in range(G)])
data.head()
f, ax = plt.subplots(5, 8, figsize=(10, 8))
ax = ax.flatten()
for i in range(G):
for s in np.unique(data["MonocleState"]):
idxs = s == data["MonocleState"].values
ax[i].scatter(data["Time"].loc[idxs], Y.iloc[:, i].loc[idxs])
ax[i].set_title(Y.columns[i])
ax[i].set_yticklabels([])
ax[i].set_xticklabels([])
f.suptitle("Branching genes, location=1.1 indicates no branching")
r = pickle.load(open("syntheticdata/syntheticDataRun.p", "rb"))
r.keys()
# plot fit for a gene
g = 0
GPy = Y.iloc[:, g][:, None]
GPt = data["Time"].values
globalBranching = data["MonocleState"].values.astype(int)
bmode = r["Bsearch"][np.argmax(r["gpmodels"][g]["loglik"])]
print("True branching time", trueBranchingTimes[g], "BGP Maximum at b=%.2f" % bmode)
_ = bplot.PlotBGPFit(GPy, GPt, r["Bsearch"], r["gpmodels"][g])
g = 0
bmode = r["Bsearch"][np.argmax(r["gpmodels"][g]["loglik"])]
pred = r["gpmodels"][g]["prediction"] # prediction object from GP
_ = bplot.plotBranchModel(
bmode,
GPt,
GPy,
pred["xtest"],
pred["mu"],
pred["var"],
r["gpmodels"][g]["Phi"],
fPlotPhi=True,
fColorBar=True,
fPlotVar=True,
)
fs, ax = plt.subplots(1, 1, figsize=(5, 5))
for g in range(G):
bmode = r["Bsearch"][np.argmax(r["gpmodels"][g]["loglik"])]
ax.scatter(bmode, g, s=100, color="b") # BGP mode
ax.scatter(trueBranchingTimes[g] + 0.05, g, s=100, color="k") # True
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Plot the data
Step3: Run the BGP model
Step4: We can also plot with the predictive uncertainty of the GP.
Step5: Plot posterior
|
7,616
|
<ASSISTANT_TASK:>
Python Code:
%load_ext watermark
%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,pandas,matplotlib,scipy,scikit-learn
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
from IPython.display import Image
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=150,
n_features=2,
centers=3,
cluster_std=0.5,
shuffle=True,
random_state=0)
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter(X[:,0], X[:,1], c='white', marker='o', s=50)
plt.grid()
plt.tight_layout()
#plt.savefig('./figures/spheres.png', dpi=300)
plt.show()
from sklearn.cluster import KMeans
km = KMeans(n_clusters=3,
init='random',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
plt.scatter(X[y_km==0,0],
X[y_km==0,1],
s=50,
c='lightgreen',
marker='s',
label='cluster 1')
plt.scatter(X[y_km==1,0],
X[y_km==1,1],
s=50,
c='orange',
marker='o',
label='cluster 2')
plt.scatter(X[y_km==2,0],
X[y_km==2,1],
s=50,
c='lightblue',
marker='v',
label='cluster 3')
plt.scatter(km.cluster_centers_[:,0],
km.cluster_centers_[:,1],
s=250,
marker='*',
c='red',
label='centroids')
plt.legend()
plt.grid()
plt.tight_layout()
#plt.savefig('./figures/centroids.png', dpi=300)
plt.show()
print('Distortion: %.2f' % km.inertia_)
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
init='k-means++',
n_init=10,
max_iter=300,
random_state=0)
km.fit(X)
distortions.append(km.inertia_)
plt.plot(range(1,11), distortions , marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.tight_layout()
#plt.savefig('./figures/elbow.png', dpi=300)
plt.show()
import numpy as np
from matplotlib import cm
from sklearn.metrics import silhouette_samples
km = KMeans(n_clusters=3,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
cluster_labels = np.unique(y_km)
n_clusters = cluster_labels.shape[0]
silhouette_vals = silhouette_samples(X, y_km, metric='euclidean')
y_ax_lower, y_ax_upper = 0, 0
yticks = []
for i, c in enumerate(cluster_labels):
c_silhouette_vals = silhouette_vals[y_km==c]
c_silhouette_vals.sort()
y_ax_upper += len(c_silhouette_vals)
color = cm.jet(i / n_clusters)
plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0,
edgecolor='none', color=color)
yticks.append((y_ax_lower + y_ax_upper) / 2)
y_ax_lower += len(c_silhouette_vals)
silhouette_avg = np.mean(silhouette_vals)
plt.axvline(silhouette_avg, color="red", linestyle="--")
plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Silhouette coefficient')
plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()
km = KMeans(n_clusters=2,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
plt.scatter(X[y_km==0,0],
X[y_km==0,1],
s=50,
c='lightgreen',
marker='s',
label='cluster 1')
plt.scatter(X[y_km==1,0],
X[y_km==1,1],
s=50,
c='orange',
marker='o',
label='cluster 2')
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], s=250, marker='*', c='red', label='centroids')
plt.legend()
plt.grid()
plt.tight_layout()
#plt.savefig('./figures/centroids_bad.png', dpi=300)
plt.show()
cluster_labels = np.unique(y_km)
n_clusters = cluster_labels.shape[0]
silhouette_vals = silhouette_samples(X, y_km, metric='euclidean')
y_ax_lower, y_ax_upper = 0, 0
yticks = []
for i, c in enumerate(cluster_labels):
c_silhouette_vals = silhouette_vals[y_km==c]
c_silhouette_vals.sort()
y_ax_upper += len(c_silhouette_vals)
color = cm.jet(i / n_clusters)
plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0,
edgecolor='none', color=color)
yticks.append((y_ax_lower + y_ax_upper) / 2)
y_ax_lower += len(c_silhouette_vals)
silhouette_avg = np.mean(silhouette_vals)
plt.axvline(silhouette_avg, color="red", linestyle="--")
plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Silhouette coefficient')
plt.tight_layout()
# plt.savefig('./figures/silhouette_bad.png', dpi=300)
plt.show()
Image(filename='./images/11_05.png', width=400)
import pandas as pd
import numpy as np
np.random.seed(123)
variables = ['X', 'Y', 'Z']
labels = ['ID_0','ID_1','ID_2','ID_3','ID_4']
X = np.random.random_sample([5,3])*10
df = pd.DataFrame(X, columns=variables, index=labels)
df
from scipy.spatial.distance import pdist,squareform
row_dist = pd.DataFrame(squareform(pdist(df, metric='euclidean')), columns=labels, index=labels)
row_dist
# 1. incorrect approach: Squareform distance matrix
from scipy.cluster.hierarchy import linkage
row_clusters = linkage(row_dist, method='complete', metric='euclidean')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])])
# 2. correct approach: Condensed distance matrix
row_clusters = linkage(pdist(df, metric='euclidean'), method='complete')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])])
# 3. correct approach: Input sample matrix
row_clusters = linkage(df.values, method='complete', metric='euclidean')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])])
from scipy.cluster.hierarchy import dendrogram
# make dendrogram black (part 1/2)
# from scipy.cluster.hierarchy import set_link_color_palette
# set_link_color_palette(['black'])
row_dendr = dendrogram(row_clusters,
labels=labels,
# make dendrogram black (part 2/2)
# color_threshold=np.inf
)
plt.tight_layout()
plt.ylabel('Euclidean distance')
#plt.savefig('./figures/dendrogram.png', dpi=300,
# bbox_inches='tight')
plt.show()
# plot row dendrogram
fig = plt.figure(figsize=(8,8), facecolor='white')
axd = fig.add_axes([0.09,0.1,0.2,0.6])
# note: for matplotlib < v1.5.1, please use orientation='right'
row_dendr = dendrogram(row_clusters, orientation='left')
# reorder data with respect to clustering
df_rowclust = df.ix[row_dendr['leaves'][::-1]]
axd.set_xticks([])
axd.set_yticks([])
# remove axes spines from dendrogram
for i in axd.spines.values():
i.set_visible(False)
# plot heatmap
axm = fig.add_axes([0.23,0.1,0.6,0.6]) # x-pos, y-pos, width, height
cax = axm.matshow(df_rowclust, interpolation='nearest', cmap='hot_r')
fig.colorbar(cax)
axm.set_xticklabels([''] + list(df_rowclust.columns))
axm.set_yticklabels([''] + list(df_rowclust.index))
# plt.savefig('./figures/heatmap.png', dpi=300)
plt.show()
from sklearn.cluster import AgglomerativeClustering
ac = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
Image(filename='./images/11_11.png', width=500)
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
plt.scatter(X[:,0], X[:,1])
plt.tight_layout()
#plt.savefig('./figures/moons.png', dpi=300)
plt.show()
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,3))
km = KMeans(n_clusters=2, random_state=0)
y_km = km.fit_predict(X)
ax1.scatter(X[y_km==0,0], X[y_km==0,1], c='lightblue', marker='o', s=40, label='cluster 1')
ax1.scatter(X[y_km==1,0], X[y_km==1,1], c='red', marker='s', s=40, label='cluster 2')
ax1.set_title('K-means clustering')
ac = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='complete')
y_ac = ac.fit_predict(X)
ax2.scatter(X[y_ac==0,0], X[y_ac==0,1], c='lightblue', marker='o', s=40, label='cluster 1')
ax2.scatter(X[y_ac==1,0], X[y_ac==1,1], c='red', marker='s', s=40, label='cluster 2')
ax2.set_title('Agglomerative clustering')
plt.legend()
plt.tight_layout()
#plt.savefig('./figures/kmeans_and_ac.png', dpi=300)
plt.show()
from sklearn.cluster import DBSCAN
db = DBSCAN(eps=0.2, min_samples=5, metric='euclidean')
y_db = db.fit_predict(X)
plt.scatter(X[y_db==0,0], X[y_db==0,1], c='lightblue', marker='o', s=40, label='cluster 1')
plt.scatter(X[y_db==1,0], X[y_db==1,1], c='red', marker='s', s=40, label='cluster 2')
plt.legend()
plt.tight_layout()
#plt.savefig('./figures/moons_dbscan.png', dpi=300)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step2: Grouping objects by similarity using k-means
Step3: <br>
Step4: <br>
Step5: Comparison to "bad" clustering
Step6: <br>
Step7: <br>
Step8: We can either pass a condensed distance matrix (upper triangular) from the pdist function, or we can pass the "original" data array and define the metric='euclidean' argument in linkage. However, we should not pass the squareform distance matrix, which would yield different distance values although the overall clustering could be the same.
Step9: <br>
Step10: <br>
Step11: <br>
Step12: K-means and hierarchical clustering
Step13: Density-based clustering
|
7,617
|
<ASSISTANT_TASK:>
Python Code:
%run dataFormating.ipynb
import sklearn
print (sklearn.__version__)
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LassoCV, Lasso
from sklearn.linear_model import RidgeCV, Ridge
from ipywidgets import FloatProgress
from IPython.display import display
from math import *
from scipy import stats
from scipy.stats.mstats import normaltest
from matplotlib.pyplot import boxplot
# Select columns that correspond to scientific questions
scientificColumns = [x for x in list(defForms.columns.values) if x[0] == "Q"]
# Pick features and target
features = defForms.loc[:, scientificColumns]
target = defForms["temporality"].astype('int')
# Classify using decision trees -accounts for the small size of the dataset and the categorical nature of the features
clf = DecisionTreeClassifier(max_depth=None, min_samples_split=2, random_state=0, max_features="auto")
scores = cross_val_score(clf, features, target)
scores.mean()
# Classify using random forests -accounts for the small size of the dataset and the categorical nature of the features, limit overfitting
clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, target)
scores.mean()
# Classify using extra tree classifiers, more random than random forest methods
clf = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, target)
scores.mean()
# Select columns that correspond to scientific questions
scientificColumns = [x for x in list(defCorrectedForms.columns.values) if x[0] == "Q"]
# Pick features and target
features = defCorrectedForms.loc[:, scientificColumns]
target = defCorrectedForms["temporality"].astype('int')
# Classify using decision trees -accounts for the small size of the dataset and the categorical nature of the features
clf = DecisionTreeClassifier(max_depth=None, min_samples_split=2, random_state=0, max_features="auto")
scores = cross_val_score(clf, features, target)
scores.mean()
# Classify using random forests -accounts for the small size of the dataset and the categorical nature of the features, limit overfitting
clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, target)
scores.mean()
# Classify using extra tree classifiers, more random than random forest methods
clf = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, target)
scores.mean()
def getPosttestUserIds(gfdf):
return gfdf[gfdf[QTemporality] == answerTemporalities[1]][QUserId].unique()
allDataWebgl1522 = prepareAllData(getAllUserVectorData(
getPosttestUserIds(gfdfWebgl1522UniqueProfiles),
rmdfWebgl1522UniqueProfiles,
gfdfWebgl1522UniqueProfiles,
_source = correctAnswers + demographicAnswers,
_printDebug=False))
allDataWebgl1522Volunteers = prepareAllData(getAllUserVectorData(
getAllResponders(gfdfWebgl1522PretestPosttestUniqueProfilesVolunteers),
rmdfWebgl1522PretestPosttestUniqueProfilesVolunteers,
gfdfWebgl1522PretestPosttestUniqueProfilesVolunteers,
_source = correctAnswers + demographicAnswers,
_printDebug=False))
allDataWebgl160 = prepareAllData(getAllUserVectorData(
getPosttestUserIds(gfdfWebgl160UniqueProfiles),
rmdfWebgl160UniqueProfiles,
gfdfWebgl160UniqueProfiles,
_source = correctAnswers + demographicAnswers,
_printDebug=False))
allDataWebgl160Volunteers = prepareAllData(getAllUserVectorData(
getAllResponders(gfdfWebgl160PretestPosttestUniqueProfilesVolunteers),
rmdfWebgl160PretestPosttestUniqueProfilesVolunteers,
gfdfWebgl160PretestPosttestUniqueProfilesVolunteers,
_source = correctAnswers + demographicAnswers,
_printDebug=False))
allDataPlaytestPhase2 = prepareAllData(getAllUserVectorData(
getAllResponders(gfdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers),
rmdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers,
gfdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers,
_source = correctAnswers + demographicAnswers,
_printDebug=False))
def getAnonymousData(allDataClassif):
return allDataClassif.drop("anonymousID", axis = 1)
# columns to exclude: contain direct information on posttest score
dropPosttestColumns = allDataClassif.columns & (deltaQuestions + posttestQuestions + ["scoreposttest", "scoredelta"])
dropPretestColumns = allDataClassif.columns & (pretestQuestions + ["scorepretest"])
def getUnscaledFeatures(anonymousData, dropPosttest=True, dropPretest=True):
# Only select rows where scoreafter is not negative
result = anonymousData[anonymousData["scoreposttest"] >= 0]
if dropPosttest:
result = result.drop(dropPosttestColumns, axis = 1)
if dropPretest:
result = result.drop(dropPretestColumns, axis = 1)
return result
def getFeaturesTarget(allDataClassif, chosenModel = Lasso):
# Remove id
anonymousData = getAnonymousData(allDataClassif)
# Get features and target
# Only select rows where scoreafter is not negative
unscaledFeatures = getUnscaledFeatures(anonymousData)
target = anonymousData[anonymousData["scoreposttest"] >= 0]["scoreposttest"]
# Center and scale data
#features = preprocessing.scale(unscaledFeatures)
# Center and scale data variant
standardScaler = preprocessing.StandardScaler()
standardScaler.fit(unscaledFeatures)
features = standardScaler.transform(unscaledFeatures)
# Run Lasso regression with cross-validation
model = chosenModel()
scores = cross_val_score(model, features, target, cv=10)
boxplot(scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
model.fit(features, target)
return scores, standardScaler, model, features, target, unscaledFeatures
scores, standardScaler, model, features, target, unscaledFeatures = getFeaturesTarget(allDataClassif)
def getInvertedCriteria(allData, criteria):
result = allData.copy()
if not (len(result.columns & criteria) == len(criteria)):
print("not all criteria are in input columns")
for criterion in criteria:
result[criterion] = 1 / (1 + result[criterion])
return result
#allDataClassifInv = getAllDataClassif(getInvertedCriteria(allData, totalTimesCriteria + completionTimesCriteria))
allDataClassifInv = getAllDataClassif(getInvertedCriteria(allData, totalTimesCriteria + completionTimesCriteria))
scoresInv, standardScalerInv, modelInv, featuresInv, targetInv, unscaledFeaturesInv = getFeaturesTarget(allDataClassifInv)
#list(set(allDataClassifInv.columns) - set(['anonymousID']))
#criteria = list(\
# set(adc.columns)\
# - set(adc.columns & \
# (deltaQuestions + posttestQuestions
# + pretestQuestions
# + ["scoreposttest", "scoredelta", 'scoreundefined', "anonymousID"]
# + ["scorepretest"]
# + predefinedCriteria
# ))\
#)
getScoresMean(allDataClassifInv, list(set(allDataClassifInv.columns)
- set(
deltaQuestions
+ posttestQuestions
+ pretestQuestions
+ ["scorepretest", "scoreposttest", "scoredelta", 'scoreundefined', 'anonymousID']
)))
def getPrediction(standardScaler, model, unscaledX):
X = standardScaler.transform([unscaledX])
return model.predict(X)[0]
def getPredictionVsActual(standardScaler, model, allDataClassif):
unscaledFeatures = getUnscaledFeatures(getAnonymousData(allDataClassif))
result = pd.DataFrame(index = unscaledFeatures.index, columns=["predicted", "actual", "error"], data = -1)
for userId in unscaledFeatures.index:
unscaledX = unscaledFeatures.loc[userId].values
actualScore = allDataClassif.loc[userId, "scoreposttest"]
result.loc[userId, "predicted"] = getPrediction(standardScaler, model, unscaledX)
result.loc[userId, "actual"] = actualScore
result.loc[userId, "error"] = result.loc[userId, "predicted"] - result.loc[userId, "actual"]
r2Coef = model.score(standardScaler.transform(unscaledFeatures), result["actual"].values)
return result, r2Coef
samples = [allDataWebgl1522,
allDataWebgl1522Volunteers,
allDataWebgl160,
allDataWebgl160Volunteers,
allDataPlaytestPhase2
]
for sample in samples:
_allDataClassif = getAllDataClassif(sample)
result, r2Coef = getPredictionVsActual(standardScaler, model, _allDataClassif)
print("{0:0=2d}".format(len(_allDataClassif)) + ": " + str(r2Coef))
_allDataClassifInv = getAllDataClassif(getInvertedCriteria(sample, totalTimesCriteria + completionTimesCriteria))
resultInv, r2CoefInv = getPredictionVsActual(standardScalerInv, modelInv, _allDataClassifInv)
print("{0:0=2d}".format(len(_allDataClassifInv)) + " inv: " + str(r2CoefInv))
def getLassoModelCoefficients(model, unscaledFeatures, useAbs = True):
nonNullIndices = np.nonzero(model.coef_)
data = model.coef_[nonNullIndices]
if useAbs:
data = abs(data)
lassoModelParameters = pd.Series(
index = unscaledFeatures.columns[nonNullIndices],
data = data
).sort_values()
return lassoModelParameters
getLassoModelCoefficients(model, unscaledFeatures)
getLassoModelCoefficients(modelInv, unscaledFeatures)
#unscaledFeatures = getUnscaledFeatures(getAnonymousData(allDataClassifWebgl160Volunteers))
#unscaledX = unscaledFeatures.iloc[0].values
#X = standardScaler.transform([unscaledX])
#model.predict(X)[0]
#X = (unscaledX - standardScaler.mean_) / standardScaler.scale_
#model.predict([X])[0]
#np.dot(model.coef_, X) + model.intercept_
if False:
anonymousData = getAnonymousData(allDataClassif)
sortedUnscaledFeatures = anonymousData[anonymousData["scoreposttest"] >= 0].sort_values(by="scoreposttest").drop(dropPosttestColumns, axis = 1)
sortedTarget = sorted(anonymousData[anonymousData["scoreposttest"] >= 0]["scoreposttest"])
# Center and scale data variant
sortedFeatures = standardScaler.transform(sortedUnscaledFeatures)
x = range(len(sortedFeatures))
alpha = 0.5
fig, ax = plt.subplots()
plt.title('Actual vs predicted score')
plt.xlabel('User index')
plt.ylabel('Score')
#plt.plot(x, model.predict(sortedFeatures), kind = 'bar')
#plt.plot(x, sortedTarget)
ax.bar(x, model.predict(sortedFeatures), alpha=alpha, label='predicted', linewidth=0)
ax.bar(x, sortedTarget, alpha=alpha, label='actual')
ax.legend()
fig.tight_layout()
plt.show()
def getFeaturesTargetSecondDegreePolynomial(allDataClassif, chosenModel = Lasso):
# Remove id
anonymousData = getAnonymousData(allDataClassif)
# Get features and target
# Only select rows where scoreafter is not negative
unscaledFeatures = getUnscaledFeatures(anonymousData)
target = anonymousData[anonymousData["scoreposttest"] >= 0]["scoreposttest"]
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
unscaledFeatures = secondDegreeFeatures.fit_transform(unscaledFeatures)
# Center and scale data variant
standardScaler = preprocessing.StandardScaler()
standardScaler.fit(unscaledFeatures)
features = standardScaler.transform(unscaledFeatures)
# Run Lasso regression with cross-validation
model = chosenModel()
scores = cross_val_score(model, features, target, cv=10)
boxplot(scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
model.fit(features, target)
return scores, standardScaler, model, features, target, unscaledFeatures
scores1, standardScaler1, model1, features1, target1, unscaledFeatures1 = getFeaturesTargetSecondDegreePolynomial(allDataClassif);
scores2, standardScaler2, model2, features2, target2, unscaledFeatures2 = getFeaturesTargetSecondDegreePolynomial(allDataClassifInv);
# Remove id
anonymousData = getAnonymousData(allDataClassifInv)
# Get features and target
# Only select rows where scoreafter is not negative
unscaledFeatures = anonymousData[anonymousData["scoreposttest"] >= 0]
#unscaledFeatures = unscaledFeatures[["craft", "death", "add", "remove", "reach", "maxChapter"] + totalTimesCriteria + completionTimesCriteria]
#unscaledFeatures = unscaledFeatures[["craft", "death", "add", "remove", "reach", "maxChapter"]]
#unscaledFeatures = unscaledFeatures[totalTimesCriteria]
#unscaledFeatures = unscaledFeatures[completionTimesCriteria]
#unscaledFeatures = unscaledFeatures[["maxChapter", "ch05completion", "ch07completion", "ch07total", "ch09total"]]
#unscaledFeatures = unscaledFeatures[['pretest Enjoyed playing', 'scorepretest', 'pretest Want to learn more about Biology', 'ch05total', 'ch07total']]
#unscaledFeatures = unscaledFeatures[['ch05completion', 'ch08total', 'ch06total', 'scorepretest', 'pretest Want to learn more about Biology', 'ch05total', 'ch07total']]
if False:#'columnsForRegression' in globals():
unscaledFeatures = unscaledFeatures[columnsForRegression]
else:
# unscaledFeatures = unscaledFeatures[['ch05completion', 'ch08total', 'ch06total', 'scorepretest', 'pretest Want to learn more about Biology', 'ch05total', 'ch07total']]
# unscaledFeatures = unscaledFeatures[['pretest Enjoyed playing', 'scorepretest', 'pretest Want to learn more about Biology', 'ch05total', 'ch07total']]
unscaledFeatures = unscaledFeatures[['ch02completion', 'ch05completion', 'ch05total', 'ch07total', 'ch08total']]
target = anonymousData[anonymousData["scoreposttest"] >= 0]["scoreposttest"]
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(unscaledFeatures)
# Center and scale data
features = preprocessing.scale(unscaledFeatures)
# Run Lasso regression with cross-validation
model = Lasso()
scores = cross_val_score(model, features, target, cv=10)
boxplot(scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
model.fit(features, target)
getLassoModelCoefficients(model, unscaledFeatures)
def getScoresMean(allDataClassif, columnsSubset):
anonymousData = getAnonymousData(allDataClassif)
unscaledFeatures = anonymousData[anonymousData["scoreposttest"] >= 0]
unscaledFeatures = unscaledFeatures[columnsSubset]
target = anonymousData[anonymousData["scoreposttest"] >= 0]["scoreposttest"]
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(unscaledFeatures)
features = preprocessing.scale(unscaledFeatures)
model = Lasso()
scores = cross_val_score(model, features, target, cv=10)
return scores.mean()
# number of possibles subsets of size n of a set of size 96
import scipy.special
scipy.special.binom(96, 3),\
scipy.special.binom(96, 4),\
scipy.special.binom(96, 5),\
scipy.special.binom(96, 6),\
scipy.special.binom(96, 7),\
scipy.special.binom(96, 8)
from IPython.display import HTML
HTML('''<script>
code_show_err=false;
function code_toggle_err() {
if (code_show_err){
$('div.output_stderr').hide();
} else {
$('div.output_stderr').show();
}
code_show_err = !code_show_err
}
$( document ).ready(code_toggle_err);
</script>
To toggle on/off output_stderr, click <a href="javascript:code_toggle_err()">here</a>.''')
def getETA(computations, timestamp):
# computing speed: computations per second
computationSpeed = 2794155 / 42338
duration = computations / computationSpeed
eta = timestamp + pd.Timedelta(seconds = duration)
return eta
import itertools
import time
import scipy.special
import warnings
from ipywidgets import Textarea, FloatText, ToggleButton, Checkbox
warnings.filterwarnings('ignore')
#adc = allDataClassif.copy()
adc = allDataClassifInv.copy()
# criteria with pretest info
#predefinedCriteria = ['ch05completion', 'scorepretest', 'pretest Want to learn more about Biology', 'ch07total', 'ch05total',]
# criteria with only RM info
#predefinedCriteria = ['ch02completion', 'ch05completion', 'ch05total', 'ch07total', 'ch08total',]
#predefinedCriteria = ['ch02completion', 'ch05completion', 'ch05total', 'ch07total', 'ch08total',]
predefinedCriteria = ['ch06completion', 'ch02completion', 'ch07total', 'ch05total', ]
criteria = list(\
set(adc.columns)\
- set(adc.columns & \
(deltaQuestions + posttestQuestions
+ pretestQuestions
+ ["scoreposttest", "scoredelta", 'scoreundefined', "anonymousID"]
+ ["scorepretest"]
+ predefinedCriteria
))\
)
subsetSize = 4
combinations = scipy.special.binom(len(criteria), subsetSize)
print("#combinations="+str(combinations))
print("ETA " + str(getETA(combinations, pd.Timestamp.now())))
if True:
# very long computation time: > 10h
maxScore = 0.36
i = 0
columnsForRegression = []
iterations = combinations+2
_progress = IntProgress(min=0, max=iterations)
_intText = IntText(0)
_currentBest = FloatText(0.0)
_currentCriteria = Textarea("")
#_stopButton = ToggleButton(value=False, description='Stop')
#_stopCheckbox = Checkbox(value=False, description='Stop')
display(_progress)
display(_intText)
display(_currentBest)
display(_currentCriteria)
#display(_stopButton)
#display(_stopCheckbox)
iterator = itertools.combinations(criteria, subsetSize)
start_time = time.time()
for columnsSubset in iterator:
#if _stopButton.value or _stopCheckbox.value or (i >= iterations):
if (i >= iterations):
break
else:
i += 1
_progress.value += 1
_intText.value+= 1
score = getScoresMean(adc, list(columnsSubset) + predefinedCriteria)
if score > maxScore:
maxScore = score
_currentBest.value = score
columnsForRegression = list(columnsSubset) + predefinedCriteria
_currentCriteria.value = str(columnsForRegression)
print("--- executed %s / %s in %s seconds ---" % (i, combinations, time.time() - start_time))
print("--- end time: " + str(pd.Timestamp.now()))
maxScore, columnsForRegression
# how long to compute all
(17 * 61124064 / 1000) / 3600,\
(249 * 57940519 / 15000) / 3600,\
(204 * 57940519 / 15000) / 3600,\
# how much computed in some duration
durationSeconds = 5 * 60
durationSeconds * 1000 / 17
# Remove id
anonymousData = gameAndCorrectedAfterDataClassif.drop("anonymousID", axis = 1)
# Get features and target
# Only select rows where scoreafter is not negative
features = anonymousData[anonymousData["scoreposttest"] >= 0]
features = features.loc[:,"sessionsCount":"completionTime"]
target = anonymousData[anonymousData["scoreposttest"] >= 0]["biologyStudy"]
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(features)
# Center and scale data
features = preprocessing.scale(features)
# Run Lasso regression with cross-validation
model = Lasso()
scores = cross_val_score(model, features, target, cv=10)
boxplot(scores)
scores
# Remove id
anonymousData = gameAndCorrectedAfterDataClassif.drop("anonymousID", axis = 1)
# Get features and target
# Only select rows where scoreafter is not negative
features = anonymousData.loc[:,"sessionsCount":"completionTime"]
target = sum(anonymousData["gameInterest"], anonymousData["gameFrequency"])
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(features)
# Center and scale data
features = preprocessing.scale(features)
# Run Lasso regression with cross-validation
model = Lasso()
scores = cross_val_score(model, features, target, cv=10)
boxplot(scores)
scores
# Given a question tag, plot scores of cross-validated model
def tryClassification(data, scientificQuestion):
# Remove id
anonymousData = data.drop("anonymousID", axis = 1)
# Get features and target
# Only select rows where scoreafter is not negative
features = anonymousData[anonymousData["scoreposttest"] >= 0]
#features = features.iloc[:,24:37]
features = features.loc[:,criteria]
target = anonymousData[anonymousData["scoreposttest"] >= 0].loc[:,scientificQuestion].astype('int')
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(features)
# Center and scale data
features = preprocessing.scale(features)
# Classify using extra tree classifiers, more random than random forest methods
clf = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, target, cv=5)
# Display plot
fig, ax = plt.subplots()
boxplot(scores)
return [scores.mean(), scores.std()]
scientificQuestionsDescrs = correctAnswers[correctAnswers.apply(len) != 0].index.values.tolist()
#scientificQuestionsDescrs
anonymousData.columns[24:37]
set(criteria) in set(anonymousData.columns)#[24:37]
#[c for c in criteria if c not in anonymousData.columns]
#[c for c in anonymousData.columns if c not in criteria]
#anonymousData[criteria]
allScores = pd.DataFrame(index = ["Mean", "Var"])
for question in scientificQuestions:# ["QGenotypePhenotype", "QBioBricksDevicesComposition", "QAmpicillin", "QBBNamePlasmid", "QBBFunctionTER", "QBBNamePromoter", "QBBFunctionGameCDS", "QBBNameTerminator", "QBBFunctionBiologyCDS", "QBBNameRBS", "QBBExampleCDS", "QBBNameCDS", "QBBFunctionPR", "QBBFunctionRBS", "QBBFunctionPlasmid", "QBBNameOperator", "QDeviceRbsPconsFlhdcTer", "QDevicePconsRbsFlhdcTer", "QDevicePbadRbsGfpTer", "QDevicePbadGfpRbsTer", "QDeviceGfpRbsPconsTer", "QDevicePconsGfpRbsTer", "QDeviceAmprRbsPconsTer", "QDeviceRbsPconsAmprTer", "QGreenFluorescence", "QUnequipDevice", "QDevicePbadRbsAraTer"]:
questionTag = question
scores = tryClassification(gameAndCorrectedAfterDataClassif, questionTag)
allScores[questionTag] = scores
allScores.columns = scientificQuestionsDescrs
allScores.T
#from scipy import stats
stats.describe(allScores.loc['Mean',:])
def getBoxplot(scores, title = ''):
# figure related code
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(scores)
ax.set_title(title)
#pd.concat([anonymousData.loc[:,"sessionsCount":"completionTime"], anonymousData.loc[:,"gameInterest":"previousPlay"]], axis=1).columns
#anonymousData.columns.values
ingameCriteria = ['sessionsCount', 'scoreposttest', 'scoreundefined', 'complete',
'configure', 'craft', 'death', 'equip', 'unequip', 'add', 'remove',
'gotourl', 'pickup', 'reach', 'restart', 'selectmenu', 'start',
'scoredelta', 'maxChapter', 'efficiency', 'thoroughness', 'fun',
'completionTime', 'ch00completion', 'ch01completion',
'ch02completion', 'ch03completion', 'ch04completion',
'ch05completion', 'ch06completion', 'ch07completion',
'ch08completion', 'ch09completion', 'ch10completion',
'ch11completion', 'ch12completion', 'ch13completion',
'ch14completion', 'ch00total', 'ch01total', 'ch02total',
'ch03total', 'ch04total', 'ch05total', 'ch06total', 'ch07total',
'ch08total', 'ch09total', 'ch10total', 'ch11total', 'ch12total',
'ch13total', 'ch14total', 'totalTime']
# boxplot function
# questions: array of strings of question names
def getPerformanceFromQuestionGroup(questions,
thresholdPercentage = 1.0,
extraTreesClassifier = False,
randomForestClassifier = False,
lasso = False,
histTarget = 0
):
# Remove id
anonymousData = gameAndCorrectedAfterDataClassif.drop("anonymousID", axis = 1)
# Get features and target
#features = pd.concat([anonymousData.loc[:,"sessionsCount":"completionTime"], anonymousData.loc[:,"gameInterest":"previousPlay"]], axis=1)
features = anonymousData.loc[:,ingameCriteria]
digitalTarget = anonymousData.loc[:, questions].astype(int).sum(axis=1)
categoricalTarget = digitalTarget.apply(lambda x: 0 if x < thresholdPercentage*len(questions) else 1)
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(features)
# Center and scale data
features = preprocessing.scale(features)
if extraTreesClassifier:
# Classify using extra tree classifiers, more random than random forest methods
clf = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, categoricalTarget, cv=10)
print("ExtraTreesClassifier scores mean: " + str(scores.mean()))
# Display plot
getBoxplot(scores, "ExtraTreesClassifier boxplot")
if randomForestClassifier:
# Classify using random forests -accounts for the small size of the dataset and the categorical nature of the features, limit overfitting
clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, categoricalTarget)
print("RandomForestClassifier scores mean: " + str(scores.mean()))
# Display plot
getBoxplot(scores, "RandomForestClassifier boxplot")
if lasso:
# Run Lasso regression with cross-validation
model = Lasso()
scores = cross_val_score(model, features, digitalTarget, cv=10)
print("Lasso scores mean: " + str(scores.mean()))
# Display plot
getBoxplot(scores, "Lasso boxplot")
if histTarget > 0:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(target, bins = range(histTarget))
hardQuestions = ["QBBFunctionPR", "QBBNameOperator", "QDevicePbadRbsAraTer"]
getPerformanceFromQuestionGroup(hardQuestions, thresholdPercentage = 0.5, extraTreesClassifier = True, randomForestClassifier = True, lasso = True)
bbSymbolRecognition = ["QBBNamePlasmid", "QBBFunctionTER", "QBBNamePromoter", "QBBFunctionGameCDS", "QBBNameTerminator", "QBBFunctionBiologyCDS", "QBBNameRBS", "QBBExampleCDS", "QBBNameCDS", "QBBFunctionPR", "QBBFunctionRBS", "QBBFunctionPlasmid", "QBBNameOperator"]
getPerformanceFromQuestionGroup(bbSymbolRecognition, thresholdPercentage = 0.6, extraTreesClassifier = True, randomForestClassifier = True, lasso = True)
easyQuestions = ["QBioBricksDevicesComposition", "QDeviceRbsPconsFlhdcTer", "QGreenFluorescence"]
getPerformanceFromQuestionGroup(easyQuestions, thresholdPercentage = 1.0, extraTreesClassifier = True, randomForestClassifier = True, lasso = True, histTarget = 14)
knowledgeQuestions = ["QAmpicillin",
"QBBNamePlasmid",
"QBBNamePromoter",
"QBBNameTerminator",
"QBBNameRBS",
"QBBNameCDS",
"QBBNameOperator",
]
getPerformanceFromQuestionGroup(knowledgeQuestions, thresholdPercentage = 0.7, extraTreesClassifier = True, randomForestClassifier = True, lasso = True, histTarget = 14)
comprehensionQuestions = ["QBioBricksDevicesComposition",
"QBBFunctionTER",
"QBBFunctionPlasmid",
"QUnequipDevice",
]
getPerformanceFromQuestionGroup(comprehensionQuestions, thresholdPercentage = 1.0, extraTreesClassifier = True, randomForestClassifier = True, lasso = True, histTarget = 14)
applicationQuestions = ["QGenotypePhenotype",
"QBBExampleCDS",
"QGreenFluorescence",
]
getPerformanceFromQuestionGroup(applicationQuestions, thresholdPercentage = 1.0, extraTreesClassifier = True, randomForestClassifier = True, lasso = True, histTarget = 14)
analysisQuestions = ["QBBFunctionGameCDS",
"QBBFunctionBiologyCDS",
"QBBFunctionPR",
"QBBFunctionRBS",
"QDevicePbadRbsAraTer",
]
getPerformanceFromQuestionGroup(analysisQuestions, thresholdPercentage = 0.7, extraTreesClassifier = True, randomForestClassifier = True, lasso = True, histTarget = 14)
synthesisQuestions = ["QDeviceRbsPconsFlhdcTer",
"QDevicePconsRbsFlhdcTer",
"QDevicePbadRbsGfpTer",
"QDevicePbadGfpRbsTer",
"QDeviceGfpRbsPconsTer",
"QDevicePconsGfpRbsTer",
"QDeviceAmprRbsPconsTer",
"QDeviceRbsPconsAmprTer",
]
getPerformanceFromQuestionGroup(synthesisQuestions, thresholdPercentage = 1.0, extraTreesClassifier = True, randomForestClassifier = True, lasso = True, histTarget = 14)
# Remove id
anonymousData = gameAndCorrectedBeforeDataClassif.drop("anonymousID", axis = 1)
# Get features and target
lastColumn = 'gender_Male'
for potentialLastColumn in ['gender_Other', 'gender_Prefer not to say']:
if potentialLastColumn in anonymousData.columns:
lastColumn = potentialLastColumn
features = anonymousData.loc[:,"gameInterest":lastColumn]
target = anonymousData.loc[:,"completionTime"]
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(features)
# Center and scale data
features = preprocessing.scale(features)
# Run Lasso regression with cross-validation
model = Lasso(max_iter=10000, alpha=10)
scores = cross_val_score(model, features, target, cv=10)
boxplot(scores)
scores.mean()
# Try classification
target = target.apply(lambda x: 0 if x < 7200 else 1) #0 if short, 1 if long
# Classify using extra tree classifiers, more random than random forest methods
clf = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, target, cv=10)
# Display plot
boxplot(scores)
scores.mean()
sum(target)/len(target)
# Remove id
anonymousData = gameAndCorrectedAfterDataClassif.drop("anonymousID", axis = 1)
# Get features and target
lastColumn = 'gender_Male'
for potentialLastColumn in ['gender_Other', 'gender_Prefer not to say']:
if potentialLastColumn in anonymousData.columns:
lastColumn = potentialLastColumn
features = anonymousData.loc[:,"gameInterest":lastColumn]
target = anonymousData.loc[:,"completionTime"]
# Add polynomial features
secondDegreeFeatures = preprocessing.PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
features = secondDegreeFeatures.fit_transform(features)
# Center and scale data
features = preprocessing.scale(features)
# Run Lasso regression with cross-validation
model = Lasso(max_iter=1000000)
scores = cross_val_score(model, features, target, cv=10)
boxplot(scores)
scores.mean()
# Try classification
target = target.apply(lambda x: 0 if x < 7200 else 1) #0 if short, 1 if long
# Classify using extra tree classifiers, more random than random forest methods
clf = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0, bootstrap=True)
scores = cross_val_score(clf, features, target, cv=10)
# Display plot
boxplot(scores)
scores.mean()
sum(target)
len(target)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Questionnaire only
Step2: Conclusion
Step3: Conclusion
Step4: Can the score of a player be predicted with their RedMetrics data?
Step5: Prediction of a single score
Step6: use allData from online campaigns
Step7: Determining the most important variables
Step8: Prediction of all scores
Step9: Conclusion
Step10: Conclusion
Step11: allDataClassif
Step12: Conclusion
Step13: Conclusion
Step14: Conclusion
Step15: Can the game data be used to predict the performance on a sub-group of scientific questions?
Step16: Using an arbitrary classification of questions
Step17: Conclusion
Step18: Conclusion
Step19: Conclusion
Step20: comprehension questions
Step21: application questions
Step22: analysis questions
Step23: synthesis questions
Step24: Can the completion time be predicted from questionnaire answers?
Step25: Conclusion
Step26: Conclusion
|
7,618
|
<ASSISTANT_TASK:>
Python Code:
numbers_str = '496,258,332,550,506,699,7,985,171,581,436,804,736,528,65,855,68,279,721,120'
numbers = [int(i) for i in numbers_str.split(",")]
max(numbers)
sorted(numbers)[-10:]
sorted([number for number in numbers if number%3 == 0])
from math import sqrt
[sqrt(number) for number in numbers if number < 100]
planets = [
{'diameter': 0.382,
'mass': 0.06,
'moons': 0,
'name': 'Mercury',
'orbital_period': 0.24,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.949,
'mass': 0.82,
'moons': 0,
'name': 'Venus',
'orbital_period': 0.62,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 1.00,
'mass': 1.00,
'moons': 1,
'name': 'Earth',
'orbital_period': 1.00,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.532,
'mass': 0.11,
'moons': 2,
'name': 'Mars',
'orbital_period': 1.88,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 11.209,
'mass': 317.8,
'moons': 67,
'name': 'Jupiter',
'orbital_period': 11.86,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 9.449,
'mass': 95.2,
'moons': 62,
'name': 'Saturn',
'orbital_period': 29.46,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 4.007,
'mass': 14.6,
'moons': 27,
'name': 'Uranus',
'orbital_period': 84.01,
'rings': 'yes',
'type': 'ice giant'},
{'diameter': 3.883,
'mass': 17.2,
'moons': 14,
'name': 'Neptune',
'orbital_period': 164.8,
'rings': 'yes',
'type': 'ice giant'}]
[planet['name'] for planet in planets if planet['diameter']/2 > (planets[2]['diameter'] / 2 * 4)]
sum([planet['mass'] for planet in planets])
[planet['name'] for planet in planets if 'giant' in planet['type']]
[planet['name'] for planet in sorted(planets, key = lambda planet: planet['moons'])]
# Useful reads:
# http://stackoverflow.com/questions/8966538/syntax-behind-sortedkey-lambda
# https://docs.python.org/3.5/howto/sorting.html#sortinghowto
import re
poem_lines = ['Two roads diverged in a yellow wood,',
'And sorry I could not travel both',
'And be one traveler, long I stood',
'And looked down one as far as I could',
'To where it bent in the undergrowth;',
'',
'Then took the other, as just as fair,',
'And having perhaps the better claim,',
'Because it was grassy and wanted wear;',
'Though as for that the passing there',
'Had worn them really about the same,',
'',
'And both that morning equally lay',
'In leaves no step had trodden black.',
'Oh, I kept the first for another day!',
'Yet knowing how way leads on to way,',
'I doubted if I should ever come back.',
'',
'I shall be telling this with a sigh',
'Somewhere ages and ages hence:',
'Two roads diverged in a wood, and I---',
'I took the one less travelled by,',
'And that has made all the difference.']
[line for line in poem_lines if re.search(r"\b\w{4}\b\s\b\w{4}\b", line)]
#character class that matches non-alphanumeric characters = \W
#in ananalogy to \s and \S
[line for line in poem_lines if re.search(r"\b\w{5}(?:$|\W$)", line)]
all_lines = " ".join(poem_lines)
[line[2:] for line in re.findall(r"\bI\b\s\b\w{1,}\b", all_lines)]
entrees = [
"Yam, Rosemary and Chicken Bowl with Hot Sauce $10.95",
"Lavender and Pepperoni Sandwich $8.49",
"Water Chestnuts and Peas Power Lunch (with mayonnaise) $12.95 - v",
"Artichoke, Mustard Green and Arugula with Sesame Oil over noodles $9.95 - v",
"Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce $19.95",
"Rutabaga And Cucumber Wrap $8.49 - v"
]
#test cell to try code bits
[item.split("$") for item in entrees if re.search(r"(?:\d\d|\d).\d\d", item)]
#TA-Stephan: Careful - price should be int.
menu = []
for item in entrees:
dish ={}
dish['name'] = re.search(r"(.*)\s\$", item).group(1)
dish['price'] = re.search(r"\d{1,2}\.\d{2}", item).group()
dish['vegetarian'] = bool(re.search(r"\s-\sv", item))
menu.append(dish)
menu
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the following cell, complete the code with an expression that evaluates to a list of integers derived from the raw numbers in numbers_str, assigning the value of this expression to a variable numbers. If you do everything correctly, executing the cell should produce the output 985 (not '985').
Step2: Great! We'll be using the numbers list you created above in the next few problems.
Step3: In the cell below, write an expression that evaluates to a list of the integers from numbers that are evenly divisible by three, sorted in numerical order. Expected output
Step4: Okay. You're doing great. Now, in the cell below, write an expression that evaluates to a list of the square roots of all the integers in numbers that are less than 100. In order to do this, you'll need to use the sqrt function from the math module, which I've already imported for you. Expected output
Step5: Problem set #2
Step6: Now, in the cell below, write a list comprehension that evaluates to a list of names of the planets that have a radius greater than four earth radii. Expected output
Step7: In the cell below, write a single expression that evaluates to the sum of the mass of all planets in the solar system. Expected output
Step8: Good work. Last one with the planets. Write an expression that evaluates to the names of the planets that have the word giant anywhere in the value for their type key. Expected output
Step9: EXTREME BONUS ROUND
Step10: Problem set #3
Step11: In the cell above, I defined a variable poem_lines which has a list of lines in the poem, and imported the re library.
Step12: Good! Now, in the following cell, write a list comprehension that evaluates to a list of lines in the poem that end with a five-letter word, regardless of whether or not there is punctuation following the word at the end of the line. (Hint
Step13: Okay, now a slightly trickier one. In the cell below, I've created a string all_lines which evaluates to the entire text of the poem in one string. Execute this cell.
Step14: Now, write an expression that evaluates to all of the words in the poem that follow the word 'I'. (The strings in the resulting list should not include the I.) Hint
Step15: Finally, something super tricky. Here's a list of strings that contains a restaurant menu. Your job is to wrangle this plain text, slightly-structured data into a list of dictionaries.
Step16: You'll need to pull out the name of the dish and the price of the dish. The v after the hyphen indicates that the dish is vegetarian---you'll need to include that information in your dictionary as well. I've included the basic framework; you just need to fill in the contents of the for loop.
|
7,619
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import numpy as np
import scipy as sp
from scipy import interpolate
import matplotlib.pyplot as plt
%matplotlib inline
import cmocean
import seawater as sw
from netCDF4 import Dataset
from llctools import llc_model
from pyspec import spectrum as spec
c1 = 'slateblue'
c2 = 'tomato'
c3 = 'k'
c4 = 'indigo'
plt.rcParams['lines.linewidth'] = 1.5
ap = .75
plt.style.use('seaborn-colorblind')
def leg_width(lg,fs):
" Sets the linewidth of each legend object
for legobj in lg.legendHandles:
legobj.set_linewidth(fs)
def parse_time(times):
Converts an array of strings that defines
the LLC outputs into datatime arrays,
e.g., '20110306T010000' --> datetime.datetime(2011, 3, 6, 1, 0)
Input
------
times: array of strings that define LLC model time
Output
------
time: array of datetime associated with times
time = []
for i in range(times.size):
yr = times[i][:4]
mo = times[i][4:6]
day = times[i][6:8]
hr = times[i][9:11]
time.append(datetime.datetime(int(yr),int(mo),int(day),int(hr)))
return np.array(time)
grid_path = '../data/llc/2160/grid/'
data_path = '../data/llc/2160/uv/'
# Kuroshio Extension model class
m = llc_model.LLCRegion(grid_dir=grid_path,data_dir=data_path,Nlon=480,Nlat=466,Nz=1)
m.load_grid()
# model sub-region surface fields files
fileu = m.data_dir+'U_480x466x1.20110308T220000'
filev = m.data_dir+'V_480x466x1.20110308T220000'
fileeta = m.data_dir[:-3]+'Eta/Eta_480x466x1.20110308T220000'
time_string = fileu[-15:]
time=llc_model.parse_time(time_string)
time
# important note: U,V are relative to the LLC model grid,
# not geostrophical coordinates. Thus, on
# faces 4 and 5, U = meridional component
# and V = -zonal component (see Dimitris's llc.readme).
u, v, eta = m.load_2d_data(filev), -m.load_2d_data(fileu), m.load_2d_data(fileeta)
lon,lat = m.lon[m.Nlat//2],m.lat[:,m.Nlon//2]
# create a regular Cartesian grid
dd = 6. # grid spacing [km]
dlon = dd/111.320*np.cos(np.abs(m.lat[m.Nlat//2,m.Nlon//2])*np.pi/180.)
dlat = dd/110.574
lonimin,lonimax = lon.min()+dlon,lon.max()-dlon
latimin,latimax = lat.min()+dlat,lat.max()-dlat
loni = np.arange(m.lon.min(),m.lon.max()+dlon,dlon)
lati = np.arange(m.lat.min(),m.lat.max()+dlat,dlat)
long,latg = np.meshgrid(loni,lati)
f0 = sw.f(latg)
interpu, interpv, interpeta = sp.interpolate.interp2d(lon,lat,u), sp.interpolate.interp2d(lon,lat,v), sp.interpolate.interp2d(lon,lat,eta)
ui, vi,etai = interpu(loni,lati), interpv(loni,lati), interpeta(loni,lati)
def calc_gradu(u,v,dd = 6.):
uy,ux = np.gradient(u,dd,dd)
vy,vx = np.gradient(v,dd,dd)
vort, div, strain = (vx - uy), ux+vy, ( (ux-vy)**2 + (vx+uy)**2 )**.5
return vort, div, strain
# double mirror ui and vi
def double_mirror(a,forward='True'):
if forward:
A = np.hstack([a,np.fliplr(a)])
A = np.vstack([A,np.fliplr(A)])
else:
iy,ix = a.shape
A = a[:iy//2,:ix//2]
return A
def calc_gradu2(u,v,dd = 6.):
u, v = double_mirror(u), double_mirror(v)
iy,ix = u.shape
Lx, Ly = (ix-1)*dd, (iy-1)*dd
dk = 1./Lx
dl = 1./Ly
l = 2*np.pi*dl*np.append( np.arange(0.,iy//2), np.arange(-iy//2,0.) )
k = 2*np.pi*dk*np.arange(0.,ix//2+1)
k,l = np.meshgrid(k,l)
uh, vh = np.fft.rfft2(u), np.fft.rfft2(v)
ux, uy = np.fft.irfft2(1j*k*uh), np.fft.irfft2(1j*l*uh)
vx, vy = np.fft.irfft2(1j*k*vh), np.fft.irfft2(1j*l*vh)
vort, div, strain = (vx - uy), ux+vy, ( (ux-vy)**2 + (vx+uy)**2 )**.5
return vort, div, strain
def rms(field):
return ((field**2).mean())**.5
vort, div, strain = calc_gradu(ui,vi,dd = 6.e3)
vort, div, strain = vort/f0, div/f0, strain/f0
vort2, div2, strain2 = calc_gradu2(ui,vi,dd = 6.e3)
vort2,div2, strain2 = double_mirror(vort2,forward=False),double_mirror(div2,forward=False), double_mirror(strain2,forward=False)
vort2, div2, strain2 = vort2/f0, div2/f0, strain2/f0
vort.mean()/np.abs(vort).max(), div.mean()/np.abs(div).max(), strain.mean()/np.abs(strain).max()
vort2.mean()/np.abs(vort2).max(), div2.mean()/np.abs(div2).max(), strain2.mean()/np.abs(strain2).max()
fig = plt.figure(figsize=(14,4))
cv = np.linspace(-1.5,1.5,20)
cd = np.linspace(-.5,.5,20)
cs = np.linspace(0.,1.5,10)
ax = fig.add_subplot(131)
plt.contourf(vort,cv,vmin=cv.min(),vmax=cv.max(),cmap=cmocean.cm.balance,extend='both')
plt.title('vorticity, rms = %f' % rms(vort))
#plt.colorbar()
plt.xticks([]); plt.yticks([])
ax = fig.add_subplot(132)
plt.contourf(vort2,cv,vmin=cv.min(),vmax=cv.max(),cmap=cmocean.cm.balance,extend='both')
plt.title('vorticity, rms = %f' % rms(vort2))
#plt.colorbar()
plt.xticks([]); plt.yticks([])
fig = plt.figure(figsize=(14,4))
ax = fig.add_subplot(131)
plt.contourf(div,cd,vmin=cd.min(),vmax=cd.max(),cmap=cmocean.cm.balance,extend='both')
plt.title('divergence, rms = %f' % rms(div))
#plt.colorbar()
plt.xticks([]); plt.yticks([])
ax = fig.add_subplot(132)
plt.contourf(div2,cd,vmin=cd.min(),vmax=cd.max(),cmap=cmocean.cm.balance,extend='both')
plt.title('divergence, rms = %f' % rms(div2))
#plt.colorbar()
plt.xticks([]); plt.yticks([])
fig = plt.figure(figsize=(14,4))
ax = fig.add_subplot(131)
plt.contourf(strain,cs,vmin=cs.min(),vmax=cs.max(),cmap=cmocean.cm.amp,extend='both')
plt.title('divergence, rms = %f' % rms(strain))
#plt.colorbar()
plt.xticks([]); plt.yticks([])
ax = fig.add_subplot(132)
plt.contourf(strain2,cs,vmin=cs.min(),vmax=cs.max(),cmap=cmocean.cm.amp,extend='both')
plt.title('strain, rms = %f' % rms(strain2))
#plt.colorbar()
plt.xticks([]); plt.yticks([])
stats_4320 = np.load(__depends__[1])
stats_2160 = np.load(__depends__[2])
llc = Dataset(__depends__[0])
time2160 = parse_time(llc['2160']['hourly']['time'][:])
timed2160 = time2160[::24]
time4320 = parse_time(llc['4320']['hourly']['time'][:])
timed4320 = time4320[::24]
cv = np.linspace(-1.5,1.5,20)
cd = np.linspace(-.5,.5,20)
cs = np.linspace(0.,1.5,10)
fig = plt.figure(figsize=(19,4))
ax = fig.add_subplot(131)
plt.contourf(vort,cv,vmin=cv.min(),vmax=cv.max(),cmap='RdBu_r',extend='both')
plt.title('vorticity, rms = %f' % rms(vort))
plt.colorbar()
plt.xticks([]); plt.yticks([])
ax = fig.add_subplot(132)
plt.title('divergence, rms = %f' % rms(div))
plt.contourf(div,cd,vmin=cd.min(),vmax=cd.max(),cmap='RdBu_r',extend='both')
plt.colorbar()
plt.xticks([]); plt.yticks([])
ax = fig.add_subplot(133)
plt.title('strain rate, rms %f' % rms(strain))
plt.contourf(strain,cs,vmax=cs.max(),cmap='viridis',extend='max')
plt.colorbar()
plt.xticks([]); plt.yticks([])
specU = spec.TWODimensional_spec(ui.copy(),d1=dd,d2=dd)
specV = spec.TWODimensional_spec(vi.copy(),d1=dd,d2=dd)
specEta = spec.TWODimensional_spec(etai.copy(),d1=dd,d2=dd)
iEu,iEv, iEeta = specU.ispec,specV.ispec, specEta.ispec
iE = 0.5*(iEu+iEv)
kr = np.array([1.e-4,1.])
e2 = kr**-2/1.e4
e3 = kr**-3/1.e7
e5 = kr**-5/1.e9
fig = plt.figure(figsize=(12,4))
ax = fig.add_subplot(121)
plt.loglog(specU.ki,iE)
plt.loglog(kr,12.*e2,'.5',linewidth=2); plt.text(1/17.5,5.e-1,'-2',fontsize=14)
plt.loglog(kr,35*e3,'.5',linewidth=2); plt.text(1/30.,2.e-2,'-3',fontsize=14)
plt.xlim(1.e-3,1.e-1)
plt.ylim(1.e-2,1.e2)
plt.xlabel('Wavenumber [cpkm]')
plt.ylabel(r'KE density [m$^2$ s$^{-2}$/cpkm]')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.45, hspace=None)
ax = fig.add_subplot(122)
plt.loglog(specEta.ki,iEeta)
plt.loglog(kr,e2/.5e1,'.5',linewidth=2); plt.text(1/17.5,1.e-2,'-2',fontsize=14)
plt.loglog(kr,3*e5/1.5e2,'.5',linewidth=2); plt.text(1/25.5,1.e-5,'-5',fontsize=14)
plt.xlim(1.e-3,1.e-1)
plt.ylim(1.e-6,1.e2)
plt.ylabel(r'SSH variance density [m$^2$/cpkm]')
plt.xlabel('Wavenumber [cpkm]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: This notebook showcases the analysis applied to LLC outputs. Here the calculations are performed for a single snapshot. The full LLC model outputs can be obtained from the ECCO Project. All fields used in this paper take about 700 GB!
Step3: Vorticity, divergence, and rate of strain
Step4: Discretization error
Step5: Quick-and-dirty, sanity-check plots
Step6: Spectra
|
7,620
|
<ASSISTANT_TASK:>
Python Code:
# Some examples (you do not have to remember this now):
a_list = [1,2,3, "let's", "use", "containers"]
a_tuple = (1, 2, 3, "let's", "use", "containers")
a_set = {1, 2, 3, "let's", "use", "containers"}
a_dict = {1:"let's", 2:"use", 3: "containers"}
#print(a_list)
#print(a_tuple)
#print(a_set)
#print(a_dict)
a_string = 'hello world'
print('example 1. upper method:', a_string.upper())
print('example 2. count method:', a_string.count('l'))
print('example 3. replace method:', a_string.replace('l', 'b'))
print('example 4. split method:', a_string.split())
print('example 5. split method:', a_string.split(sep='o'))
help(str.replace)
a_string = 'rats are the best.'
result = a_string.replace('r', 'c')
print(result)
a_string = 'rats are the best.'
result = a_string.replace('r', 'c', 1)
print(result)
help(str.split)
a_string = 'USA Today has come out with a new survey: Apparently three out of four people make up 75 percent of the population.'
words = a_string.split()
print(words)
a_string = 'USA Today has come out with a new survey: Apparently three out of four people make up 75 percent of the population.'
words = a_string.split(' ', 2)
print(words)
a_string = 'USA Today has come out with a new survey: Apparently three out of four people make up 75 percent of the population.'
words = a_string.split(sep=' ', maxsplit=2)
print(words)
a_string = 'rats are the best.'
result = a_string.replace('r', 'c', count=1)
a_string = 'rats are the best.'
result = a_string.replace('r', 'c', 1)
print(result)
a_string = 'rats are the best.'
result = a_string.replace('r', 'c', count=1)
print(a_string.lower())
print(a_string.strip())
print(a_string.strip('an'))
print(a_string.partition('and'))
# your examples here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Understanding class methods
Step2: In all of the examples above, a string method is called, e.g., upper or count.
Step3: The method documentation contains three parts
Step4: It worked! We've called the string method by only providing a value for the positional parameters. However, what if we are not happy with the provided default value, can we override it?
Step5: Yes! We've provided a value for count, e.g., 1, and now 'r' is only replaced once with 'c'. Luckily, the 'r' in 'are' has not been replaced.
Step6: Let's go through the parameters of the string method split
Step7: And that is correct! Of course, we can specify a value for the keyword parameters. We provide the a space ' ' for sep and 2 for maxsplit.
Step8: Please note that we have splitted the string on a space ' ' two times.
Step9: For the answer, we need to go back to the function parameters
Step10: This does not.
Step11: Summary
Step12: Exercise 2
|
7,621
|
<ASSISTANT_TASK:>
Python Code:
import openpnm as op
wrk = op.Workspace()
wrk.logelevel=50
pn = op.network.Cubic(shape=[10, 10, 10], spacing=1)
pn = op.network.Cubic(shape=[10, 10, 10], spacing=1, connectivity=26)
import scipy as sp
pn = op.network.Cubic(shape=[10, 10, 10], spacing=[1, 1, 1], connectivity=26)
print(pn.num_throats())
throats_to_trim = sp.random.randint(low=0, high=pn.Nt-1, size=500)
from openpnm import topotools as tt
tt.trim(network=pn, throats=throats_to_trim)
# randint returns some duplicate numbers so actual number of trimmed throats varies
assert pn.num_throats() < 10476
pn = op.network.Cubic(shape=[20, 20, 10], spacing=[0.003, 0.02, 0.01])
import scipy.ndimage as spim
im = sp.ones([21, 21, 21])
im[10, 10, 10] = 0
dt = spim.distance_transform_bf(input=im)
sphere = dt < 10
pn = op.network.CubicTemplate(template=sphere, spacing=0.1)
op.io.VTK.save(network=pn, filename='my_network')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this case pn will be a 10 x 10 x 10 cube with each pore spaced 1 unit away from it's neighbors in all directions. Each pore is connected to the 6 neighbors adjacent to each face of the cubic lattice site in which it sits. The image below illustrates the resulting network with pores shown as white spheres, along with a zoomed in view of the internals, showing the connectivity of the pores.
Step2: This yields the following network, which clearly has a LOT of connections!
Step3: The following image shows histogram of the pore connectivity before and after trimming. Before trimming the coordination numbers fall into 4 distinct bins depending on where the pores lies (internal, face, edge or corner), while after trimming the coordination numbers show some distribution around their original values. If the trimming is too aggressive, OpenPNM might report an error message saying that isolated pores exist, which means that some regions of the network are now disconnected from the main network due to a lack of connected throats.
Step4: This results in the following network with is squished in the x-direction.
Step5: This results in the following
|
7,622
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import seaborn as sns; sns.set_context('notebook')
from pymc3 import exp, Normal, Binomial, sample, Model
# Samples for each dose level
n = 5 * np.ones(4, dtype=int)
# Log-dose
dose = np.array([-.86, -.3, -.05, .73])
deaths = np.array([0, 1, 3, 5])
def invlogit(x):
return exp(x) / (1 + exp(x))
with Model() as bioassay_model:
# Logit-linear model parameters
alpha = Normal('alpha', 0, 0.01)
beta = Normal('beta', 0, 0.01)
# Calculate probabilities of death
theta = invlogit(alpha + beta * dose)
# Data likelihood
deaths = Binomial('deaths', n=n, p=theta, observed=deaths)
from pymc3 import Metropolis
with bioassay_model:
step = Metropolis(scaling=0.0001)
bioassay_trace = sample(1000, step=step)
from pymc3 import traceplot
traceplot(bioassay_trace[500:], varnames=['alpha'])
with bioassay_model:
bioassay_trace = sample(10000)
traceplot(bioassay_trace[9000:], varnames=['beta'])
import matplotlib.pyplot as plt
beta_trace = bioassay_trace['beta']
fig, axes = plt.subplots(2, 5, figsize=(14,6))
axes = axes.ravel()
for i in range(10):
axes[i].hist(beta_trace[500*i:500*(i+1)])
plt.tight_layout()
with bioassay_model:
bioassay_trace = sample(1000, njobs=2, start=[{'alpha':0.5}, {'alpha':5}])
bioassay_trace.get_values('alpha', chains=0)[0]
plt.plot(bioassay_trace.get_values('alpha', chains=0)[:200], 'r--')
plt.plot(bioassay_trace.get_values('alpha', chains=1)[:200], 'k--')
from pymc3 import geweke
with bioassay_model:
tr = sample(2000)
z = geweke(tr, intervals=15)
plt.scatter(*z['alpha'].T)
plt.hlines([-1,1], 0, 1000, linestyles='dotted')
plt.xlim(0, 1000)
from pymc3 import gelman_rubin
gelman_rubin(bioassay_trace)
from pymc3 import forestplot
forestplot(bioassay_trace)
from pymc3 import autocorrplot
autocorrplot(tr);
bioassay_trace['alpha'].shape
from pymc3 import effective_n
effective_n(bioassay_trace)
from pymc3 import Normal, Binomial, Deterministic, invlogit
# Samples for each dose level
n = 5 * np.ones(4, dtype=int)
# Log-dose
dose = np.array([-.86, -.3, -.05, .73])
with Model() as model:
# Logit-linear model parameters
alpha = Normal('alpha', 0, 0.01)
beta = Normal('beta', 0, 0.01)
# Calculate probabilities of death
theta = Deterministic('theta', invlogit(alpha + beta * dose))
# Data likelihood
deaths = Binomial('deaths', n=n, p=theta, observed=[0, 1, 3, 5])
with model:
deaths_sim = Binomial('deaths_sim', n=n, p=theta, shape=4)
with model:
gof_trace = sample(2000)
from pymc3 import forestplot
forestplot(gof_trace, varnames=['deaths_sim'])
r_t_obs = [3, 7, 5, 102, 28, 4, 98, 60, 25, 138, 64, 45, 9, 57, 25, 33, 28, 8, 6, 32, 27, 22]
n_t_obs = [38, 114, 69, 1533, 355, 59, 945, 632, 278,1916, 873, 263, 291, 858, 154, 207, 251, 151, 174, 209, 391, 680]
r_c_obs = [3, 14, 11, 127, 27, 6, 152, 48, 37, 188, 52, 47, 16, 45, 31, 38, 12, 6, 3, 40, 43, 39]
n_c_obs = [39, 116, 93, 1520, 365, 52, 939, 471, 282, 1921, 583, 266, 293, 883, 147, 213, 122, 154, 134, 218, 364, 674]
N = len(n_c_obs)
# Write your answer here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Informal Methods
Step2: A similar approach involves
Step3: An extension of this approach can be taken
Step4: A principal reason that evidence from informal techniques cannot
Step5: The arguments expected are the following
Step6: For the best results, each chain should be initialized to highly
Step7: Autocorrelation
Step8: Goodness of Fit
Step9: The posterior predictive distribution of deaths uses the same functional
Step10: Notice that the observed stochastic Binomial has been replaced with a stochastic node that is identical in every respect to deaths, except that its values are not fixed to be the observed data -- they are left to vary according to the values of the fitted parameters.
Step11: Exercise
|
7,623
|
<ASSISTANT_TASK:>
Python Code:
import io
from omegaconf import OmegaConf
import pp
from pp.config import connections
c0 = pp.c.mzi()
pp.qp(c0)
netlist = c0.get_netlist()
import pp
c1 = pp.component_from_yaml('mzi.yml')
pp.show(c1)
pp.plotgds(c1)
len(c1.get_netlist().connections)
connections.clear() # reset the connections global variable
import pp
c2 = pp.component_from_yaml('mzi2.yml')
pp.show(c2)
pp.plotgds(c2)
len(c2.get_netlist().connections)
connections.clear() # reset the connections global variable
import pp
c3 = pp.component_from_yaml('mzi3.yml')
pp.show(c3)
pp.plotgds(c3)
len(c3.get_netlist().connections)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is what we did to store the netlist
Step2: You can also reconstruct the circuit from the instances and placements but the you lose the most important part of the netlist, which is the connections
Step3: A better way to reconstruct the circuit and maintain connectivity is from the instances and connections
|
7,624
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def square(n):
squares = []
for i in range(n):
squares.append(i**2)
return squares
print(square(10))
sq = lambda n: [i**2 for i in range(n)]
print(sq(10)) #actually this isn't really functional! printing is a "side effect"
class Observation(): # "object" and "class" are interchangable!
def __init__(self, data): # method
self.data = data #attribute
def average(self): # method
dsum = 0
for i,d in enumerate(self.data):
dsum += d
average=dsum/(i+1)
return average
obs1 = Observation([0,1,2])
obs2 = Observation([4,5,6])
print("Avg 1 = {:e}; Avg 2 = {:e}".format(obs1.average(), obs2.average()))
print("Type of Avg 1 = {:}; Type of Avg 2 = {:}".format(type(obs1), type(obs2)))
print(obs1.data)
print(obs2.data)
class TimeSeries(Observation): # inherits all the methods and attributes from Observation
def __init__(self, time, data):
self.time = time
Observation.__init__(self, data) # this calls the constructor of the base class
if len(self.time) != len(self.data):
raise ValueError("Time and data must have same length!")
def stop_time(self):
return self.time[-1] # unclear why you would want this
tobs = TimeSeries([0,1,2],[3,4,5])
print(tobs)
print("Stop time = {:e}".format(tobs.stop_time())) # new method
print("tobs average = {:e}".format(tobs.average())) # but tobs also has methods from Observation
print(print) #functions are objects!
dont_do_this = print # this is the object representing a function!
dont_do_this("dont do this!")
x = np.linspace(0,2*np.pi, 1000)
y_theory = np.sinc(x)
y = y_theory + np.random.rand(1000)
fig = plt.figure(figsize=(8,8)) # create a figure object
ax_data = fig.add_axes([0.1,0.4,0.8,0.8]) # figure objects have an add_axes method
ax_residual = fig.add_axes([0.1,0.1,0.8,0.3])
# this is one axis
ax_data.plot(x,y, label='sinc(x)') # a plot is a **method** of a set of axes!
ax_data.legend() # so is a legend
ax_data.set_ylabel('f(x)') # the labels are attributes, set_ylabel sets it
# this is another...just refer to them by name!
ax_residual.plot(x, y-y_theory, label='residual')
ax_residual.legend()
ax_residual.set_xlabel('x')
ax_residual.set_ylabel('residual')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Programming "Paradigms"
Step2: Functional
Step3: Objects
Step4: Instances
Step5: Inheritance
Step6: Objects in practice
Step7: Example
|
7,625
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# define colours used in plots
dark_purple = '#8464c5'
light_purple = '#ededfb'
dark_green = '#336433'
light_green = '#a0d9a0'
white = '#FFFFFF'
olive = '#aaa460'
def get_data(t):
Loads the hysteresis data for a specified thickness, t, from the relevant file
and calculates the values of dm_z/dH.
Creates arrays for the values of H (*Ms)
Returns mz, dm_z/dH (scaled values) and the values for H
(mz, dmdH_scaled, H_all)
# load hysteresis data for specified thickness
mx, my, mz, energy = np.load('../data/figure_1/hysteresis_loops/'
'sim_hysteresis_FeGe_nanodisk_d150_h{}.npy'.format(int(t)))
# create arrays for the Zeeman field
H_up = np.linspace(-4, 4, 400, endpoint=False)
H_down = np.linspace(4, -4, 401)
H_all = np.concatenate((H_up, H_down), axis=0)
# calculate dm/dH from the data. dm/dH is scaled to a maximum value of 1.
dm = mz[1:-1] - mz[0:-2]
dH = H_all[1:-1] - H_all[0:-2]
dmdH = dm/dH
dmdH_scaled = dmdH/max(dmdH)
return mz, dmdH_scaled, H_all
def base_plot(mz, dmdH_scaled, H_all):
Function to plot the mz vs. H hysteresis curves.
Adds colour shading to the different regions occuring throughout the hysteresis.
Requires the values of mz, dmdH_scaled and H, the array of field steps on the hysteresis loop.
The plot is returned
# get relevant data for the specified thickness
mz, dmdH_scaled, H_all = get_data(t)
# create the figure and define an axis paramerter.
fig = plt.figure(figsize=(9, 5))
ax = fig.add_subplot(111)
# plot mz vs. H values.
ax.plot(H_all[0:400], mz[0:400], 'k', linewidth=2.5, label="Increasing H")
ax.plot(H_all[400:801], mz[400:801], '--', color='k', linewidth=2.5, label="Decreasing H")
# plot the dm_z/dH vs. H values
ax.plot(H_all[0:400], dmdH_scaled[0:400], 'b--', markevery=3, linewidth=1.5, label=r'dm$_z$/dH')
# add axis labels
plt.ylabel(r'm$_{\mathrm{z}}$', fontsize=20)
plt.xlabel(r'H ($\times$M$_{\mathrm{s}}$)', fontsize=20)
plt.xticks([-4, -3 ,-2, -1, 0, 1, 2, 3, 4], fontsize=18)
plt.yticks([-1, -0.5, 0, 0.5, 1], fontsize=18)
plt.xlim([-3, 3])
# add the legend
plt.legend(loc='lower right', fontsize=16)
plt.tight_layout()
return plt, ax
# define the thickness
t = 20
# get data
mz, dmdH_scaled, H_all = get_data(t)
plt, ax = base_plot(mz, dmdH_scaled, H_all)
trans = [219, 319]
print 'These transition values correspond to values of H={219}*Ms and H={319}*Ms\n'\
'where Ms=384 kA/m'.format(*H_all)
# add letter labels, which refer to 3D magnetisation plots
ax.text(-1.5, 0.2, '(a)', fontsize=20)
ax.text(1.35, 0.2, '(f)', fontsize=20)
ax.text(2.55, 0.2, '(g)', fontsize=20)
# Colour the different regions in the hysteresis plot
ax.axvspan(H_all[0], H_all[trans[0]], color=dark_purple)
ax.axvspan(H_all[trans[0]], H_all[trans[1]], color=light_purple)
ax.axvspan(H_all[trans[1]], 3, color=dark_green)
plt.savefig('pdfs/figure-1i-20nm.pdf')
plt.show()
t = 35
mz, dmdH_scaled, H_all = get_data(t)
plt, ax = base_plot(mz, dmdH_scaled, H_all)
trans = [207, 220, 310]
print 'These transition values correspond to values of H={207}*Ms, H={220}*Ms and H={310}*Ms\n'\
'where Ms=384 kA/m'.format(*H_all)
# add letter labels, which refer to 3D magnetisation plots
ax.text(-1.55, 0.3, '(a)', fontsize=20)
ax.text(0.1, -0.3, '(h)', fontsize=20)
ax.text(1.15, -0.3, '(f)', fontsize=20)
ax.text(2.45, -0.3, '(g)', fontsize=20)
# Colour the different regions in the hysteresis plot
ax.axvspan(H_all[0], H_all[trans[0]], color=dark_purple)
ax.axvspan(H_all[trans[0]], H_all[trans[1]], color=olive)
ax.axvspan(H_all[trans[1]], H_all[trans[2]], color=light_purple)
ax.axvspan(H_all[trans[2]], 3, color=dark_green)
plt.savefig('pdfs/figure-1i-35nm.pdf')
plt.show()
t = 55
mz, dmdH_scaled, H_all = get_data(t)
plt, ax = base_plot(mz, dmdH_scaled, H_all)
trans = [153, 176, 210, 214, 225, 304]
print 'These transition values correspond to values of H={153}*Ms, H={176}*Ms, '\
'H={210}*Ms, H={214}*Ms, H={225}*Ms and H={304}*Ms\n'\
'where Ms=384 kA/m'.format(*H_all)
# add letter labels, which refer to 3D magnetisation plots
ax.text(-2.05, 0.5, '(a)', fontsize=20)
ax.text(-0.85, 0.5, '(b)', fontsize=20)
ax.text(-0.25, 0.5, '(c)', fontsize=20)
ax.text(1.2, 0.5, '(f)', fontsize=20)
ax.text(2.4, 0.5, '(g)', fontsize=20)
ax.annotate('(d)', xy=(0.25, -0.35), xytext=(-0.25, -0.5),
arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=6, frac=0.3),
fontsize=20)
ax.annotate('(e)', xy=(0.4, -0.35), xytext=(0.6, -0.5),
arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=6, frac=0.3),
fontsize=20)
# Colour the different regions in the hysteresis plot
ax.axvspan(H_all[0], H_all[trans[0]], color=dark_purple)
ax.axvspan(H_all[trans[0]], H_all[trans[1]], color=olive)
ax.axvspan(H_all[trans[1]], H_all[trans[2]], color=light_green)
ax.axvspan(H_all[trans[2]], H_all[trans[3]], color=olive)
ax.axvspan(H_all[trans[3]], H_all[trans[4]], color=white)
ax.axvspan(H_all[trans[4]], H_all[trans[5]], color=light_purple)
ax.axvspan(H_all[trans[5]], 3, color=dark_green)
plt.savefig('pdfs/figure-1i-55nm.pdf')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: We start by defining a few helper variables and functions which be used for creating the plots below.
Step4: The plots are produced below.
|
7,626
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q -U "tensorflow-text==2.8.*"
!pip install -q tf-models-official==2.7.0
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from official import nlp
from official.nlp.modeling.ops import sampling_module
from official.nlp.modeling.ops import beam_search
params = {}
params['num_heads'] = 2
params['num_layers'] = 2
params['batch_size'] = 2
params['n_dims'] = 256
params['max_decode_length'] = 4
cache = {
'layer_%d' % layer: {
'k': tf.zeros([params['batch_size'], params['max_decode_length'], params['num_heads'], int(params['n_dims']/params['num_heads'])], dtype=tf.float32),
'v': tf.zeros([params['batch_size'], params['max_decode_length'], params['num_heads'], int(params['n_dims']/params['num_heads'])], dtype=tf.float32)
} for layer in range(params['num_layers'])
}
print("cache key shape for layer 1 :", cache['layer_1']['k'].shape)
def length_norm(length, dtype):
Return length normalization factor.
return tf.pow(((5. + tf.cast(length, dtype)) / 6.), 0.0)
probabilities = tf.constant([[[0.3, 0.4, 0.3], [0.3, 0.3, 0.4],
[0.1, 0.1, 0.8], [0.1, 0.1, 0.8]],
[[0.2, 0.5, 0.3], [0.2, 0.7, 0.1],
[0.1, 0.1, 0.8], [0.1, 0.1, 0.8]]])
def model_fn(i):
return probabilities[:, i, :]
def _symbols_to_logits_fn():
Calculates logits of the next tokens.
def symbols_to_logits_fn(ids, i, temp_cache):
del ids
logits = tf.cast(tf.math.log(model_fn(i)), tf.float32)
return logits, temp_cache
return symbols_to_logits_fn
greedy_obj = sampling_module.SamplingModule(
length_normalization_fn=None,
dtype=tf.float32,
symbols_to_logits_fn=_symbols_to_logits_fn(),
vocab_size=3,
max_decode_length=params['max_decode_length'],
eos_id=10,
padded_decode=False)
ids, _ = greedy_obj.generate(
initial_ids=tf.constant([9, 1]), initial_cache=cache)
print("Greedy Decoded Ids:", ids)
top_k_obj = sampling_module.SamplingModule(
length_normalization_fn=length_norm,
dtype=tf.float32,
symbols_to_logits_fn=_symbols_to_logits_fn(),
vocab_size=3,
max_decode_length=params['max_decode_length'],
eos_id=10,
sample_temperature=tf.constant(1.0),
top_k=tf.constant(3),
padded_decode=False,
enable_greedy=False)
ids, _ = top_k_obj.generate(
initial_ids=tf.constant([9, 1]), initial_cache=cache)
print("top-k sampled Ids:", ids)
top_p_obj = sampling_module.SamplingModule(
length_normalization_fn=length_norm,
dtype=tf.float32,
symbols_to_logits_fn=_symbols_to_logits_fn(),
vocab_size=3,
max_decode_length=params['max_decode_length'],
eos_id=10,
sample_temperature=tf.constant(1.0),
top_p=tf.constant(0.9),
padded_decode=False,
enable_greedy=False)
ids, _ = top_p_obj.generate(
initial_ids=tf.constant([9, 1]), initial_cache=cache)
print("top-p sampled Ids:", ids)
beam_size = 2
params['batch_size'] = 1
beam_cache = {
'layer_%d' % layer: {
'k': tf.zeros([params['batch_size'], params['max_decode_length'], params['num_heads'], params['n_dims']], dtype=tf.float32),
'v': tf.zeros([params['batch_size'], params['max_decode_length'], params['num_heads'], params['n_dims']], dtype=tf.float32)
} for layer in range(params['num_layers'])
}
print("cache key shape for layer 1 :", beam_cache['layer_1']['k'].shape)
ids, _ = beam_search.sequence_beam_search(
symbols_to_logits_fn=_symbols_to_logits_fn(),
initial_ids=tf.constant([9], tf.int32),
initial_cache=beam_cache,
vocab_size=3,
beam_size=beam_size,
alpha=0.6,
max_decode_length=params['max_decode_length'],
eos_id=10,
padded_decode=False,
dtype=tf.float32)
print("Beam search ids:", ids)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Decoding API
Step2: Initialize Sampling Module in TF-NLP.
Step3: In auto-regressive architectures like Transformer based Encoder-Decoder models,
Step5: Define closure for length normalization if needed.
Step6: Create model_fn
Step8: Initialize symbols_to_logits_fn
Step9: Greedy
Step10: top_k sampling
Step11: top_p sampling
Step12: Beam search decoding
|
7,627
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
from ipywidgets import interact
import ipywidgets as widgets
from common import common_vcu_demo_streamin_decode_display
import os
from ipywidgets import HBox, VBox, Text, Layout
codec_type=widgets.RadioButtons(
options=['avc', 'hevc'],
description='Codec Type:',
disabled=False)
video_sink={'kmssink':['DP', 'HDMI'], 'fakevideosink':['none']}
def print_video_sink(VideoSink):
pass
def select_video_sink(VideoCodec):
display_type.options = video_sink[VideoCodec]
sink_name = widgets.RadioButtons(options=sorted(video_sink.keys(), key=lambda k: len(video_sink[k]), reverse=True), description='Video Sink:')
init = sink_name.value
display_type = widgets.RadioButtons(options=video_sink[init], description='Display:')
j = widgets.interactive(print_video_sink, VideoSink=display_type)
i = widgets.interactive(select_video_sink, VideoCodec=sink_name)
HBox([codec_type, i, j])
audio_sink={'none':['none'], 'aac':['auto','alsasink','pulsesink'],'vorbis':['auto','alsasink','pulsesink']}
audio_src={'none':['none'], 'aac':['auto','alsasrc','pulsesrc'],'vorbis':['auto','alsasrc','pulsesrc']}
#val=sorted(audio_sink, key = lambda k: (-len(audio_sink[k]), k))
def print_audio_sink(AudioSink):
pass
def print_audio_src(AudioSrc):
pass
def select_audio_sink(AudioCodec):
audio_sinkW.options = audio_sink[AudioCodec]
audio_srcW.options = audio_src[AudioCodec]
audio_codecW = widgets.RadioButtons(options=sorted(audio_sink.keys(), key=lambda k: len(audio_sink[k])), description='Audio Codec:')
init = audio_codecW.value
audio_sinkW = widgets.RadioButtons(options=audio_sink[init], description='Audio Sink:')
audio_srcW = widgets.RadioButtons(options=audio_src[init], description='Audio Src:')
j = widgets.interactive(print_audio_sink, AudioSink=audio_sinkW)
i = widgets.interactive(select_audio_sink, AudioCodec=audio_codecW)
HBox([i, j])
kernel_recv_buffer_size=widgets.Text(value='',
placeholder='(optional) 16000000',
description='Kernel Recv Buf Size:',
style={'description_width': 'initial'},
#layout=Layout(width='33%', height='30px'),
disabled=False)
port_number=widgets.Text(value='',
placeholder='(optional) 50000, 42000',
description=r'Port No:',
#style={'description_width': 'initial'},
#
disabled=False)
#kernel_recv_buffer_size
HBox([kernel_recv_buffer_size, port_number])
entropy_buffers=widgets.Dropdown(
options=['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15'],
value='5',
description='Entropy Buffers Nos:',
style={'description_width': 'initial'},
disabled=False,)
show_fps=widgets.Checkbox(
value=False,
description='show-fps',
#style={'description_width': 'initial'},
disabled=False)
HBox([entropy_buffers, show_fps])
from IPython.display import clear_output
from IPython.display import Javascript
def run_all(ev):
display(Javascript('IPython.notebook.execute_cells_below()'))
def clear_op(event):
clear_output(wait=True)
return
button1 = widgets.Button(
description='Clear Output',
style= {'button_color':'lightgreen'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '300px'}
)
button2 = widgets.Button(
description='',
style= {'button_color':'white'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '38px'},
disabled=True
)
button1.on_click(run_all)
button1.on_click(clear_op)
def start_demo(event):
#clear_output(wait=True)
arg = common_vcu_demo_streamin_decode_display.cmd_line_args_generator(port_number.value, codec_type.value, audio_codecW.value, display_type.value, kernel_recv_buffer_size.value, sink_name.value, entropy_buffers.value, show_fps.value, audio_sinkW.value);
#sh vcu-demo-streamin-decode-display.sh $arg > logs.txt 2>&1
!sh vcu-demo-streamin-decode-display.sh $arg
return
button = widgets.Button(
description='click to start vcu-stream_in-decode-display demo',
style= {'button_color':'lightgreen'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '350px'}
)
button.on_click(start_demo)
HBox([button, button2, button1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the Demo
Step2: Video
Step3: Audio
Step4: Advanced options
|
7,628
|
<ASSISTANT_TASK:>
Python Code:
from problem_formulation import joint
joint
import chaospy
polynomial_expansion = chaospy.generate_expansion(3, joint)
polynomial_expansion[:4].round(10)
alpha, beta = chaospy.variable(2)
phi_phi = chaospy.outer(
polynomial_expansion, polynomial_expansion)
[polynomial_expansion.shape, phi_phi.shape]
e_beta_phi_phi = chaospy.E(beta*phi_phi, joint)
e_beta_phi_phi[:3, :3].round(6)
_, norms = chaospy.generate_expansion(3, joint, retall=True)
norms[:4].round(6)
import numpy
def right_hand_side(c, t):
return -numpy.sum(c*e_beta_phi_phi, -1)/norms
e_alpha_phi = chaospy.E(alpha*polynomial_expansion, joint)
initial_condition = e_alpha_phi/norms
from scipy.integrate import odeint
coordinates = numpy.linspace(0, 10, 1000)
coefficients = odeint(func=right_hand_side,
y0=initial_condition, t=coordinates)
coefficients.shape
u_approx = chaospy.sum(polynomial_expansion*coefficients, -1)
u_approx[:4].round(2)
mean = chaospy.E(u_approx, joint)
variance = chaospy.Var(u_approx, joint)
mean[:5].round(6), variance[:5].round(6)
from matplotlib import pyplot
pyplot.rc("figure", figsize=[6, 4])
pyplot.xlabel("coordinates")
pyplot.ylabel("model approximation")
pyplot.axis([0, 10, 0, 2])
sigma = numpy.sqrt(variance)
pyplot.fill_between(coordinates, mean-sigma, mean+sigma, alpha=0.3)
pyplot.plot(coordinates, mean)
pyplot.show()
from problem_formulation import error_in_mean, error_in_variance
error_in_mean(mean).round(16), error_in_variance(variance).round(12)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here the parameters are positional defined as $\alpha$ and $\beta$
Step2: Note again, that the variables are here defined positional. $\alpha$ and
Step3: As above, these two variables are defined positional to correspond to both
Step4: This allows us to calculate the full expression
Step5: For the numerator $\mbox E(\Phi_k\Phi_k)$, it is worth noting that these are
Step6: Having all terms in place, we can create a function for the right-hand-side
Step7: Initial conditions
Step8: Equation solving
Step9: These coefficients can then be used to construct the approximation for $u$
Step10: Finally, this can be used to calculate statistical properties
Step11: Using the true mean and variance as reference, we can also calculate the mean
|
7,629
|
<ASSISTANT_TASK:>
Python Code:
import gzip
import pickle
import numpy as np
import matplotlib.pyplot as plt
import random
def vectorized_result(d):
e = np.zeros((10, 1), dtype=np.float32)
e[d] = 1.0
return e
vectorized_result(2)
def load_data():
with gzip.open('../mnist.pkl.gz', 'rb') as f:
train, validate, test = pickle.load(f, encoding="latin1")
print(f'shape of training data: {(train[0].shape, train[1].shape)}')
training_inputs = [np.reshape(x, (784, 1)) for x in train[0]]
training_results = [vectorized_result(y) for y in train[1]]
training_data = list(zip(training_inputs, training_results))
test_inputs = [np.reshape(x, (784, 1)) for x in test[0]]
test_data = list(zip(test_inputs, test[1]))
return training_data, test_data
list(zip([1, 2], ['a', 'b']))
%%time
training_data, test_data = load_data()
def show_digits(rows, columns, offset=0):
f, axarr = plt.subplots(rows, columns)
for r in range(rows):
for c in range(columns):
i = r * columns + c + offset
image = 1 - training_data[i][0]
image = np.reshape(image, (28, 28))
axarr[r, c].imshow(image, cmap="gray")
axarr[r, c].axis('off')
plt.savefig("digits.pdf")
plt.show()
show_digits(5, 15, 0)
!explorer digits.pdf || open digits.pdf
def rndMatrix(rows, cols):
return np.random.randn(rows, cols) / np.sqrt(cols)
rndMatrix(2, 2)
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
sigmoid(np.array([-1, 0, 1]))
def sigmoid_prime(x):
s = sigmoid(x)
return s * (1 - s)
sigmoid_prime(np.array([-5, 0, 5]))
class Network(object):
def __init__(self, hiddenSize):
self.mInputSize = 28 * 28
self.mHiddenSize = hiddenSize
self.mOutputSize = 10
self.mBiasesH = np.zeros((self.mHiddenSize, 1)) # biases hidden layer
self.mBiasesO = np.zeros((self.mOutputSize, 1)) # biases output layer
self.mWeightsH = rndMatrix(self.mHiddenSize, self.mInputSize) # weights hidden layer
self.mWeightsO = rndMatrix(self.mOutputSize, self.mHiddenSize) # weights output layer
def feedforward(self, x):
AH = sigmoid(self.mWeightsH @ x + self.mBiasesH) # hidden layer
AO = sigmoid(self.mWeightsO @ AH + self.mBiasesO) # output layer
return AO
Network.feedforward = feedforward
def sgd(self, training_data, epochs, mbs, eta, test_data):
n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:k+mbs] for k in range(0, n, mbs)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
print('Epoch %2d: %d / %d' % (j, self.evaluate(test_data), n_test))
Network.sgd = sgd
def update_mini_batch(self, mini_batch, eta):
nabla_BH = np.zeros((self.mHiddenSize, 1)) # gradient of biases of hidden layer
nabla_BO = np.zeros((self.mOutputSize, 1)) # gradient of biases of output layer
nabla_WH = np.zeros((self.mHiddenSize, self.mInputSize)) # gradient of weights of hidden layer
nabla_WO = np.zeros((self.mOutputSize, self.mHiddenSize)) # gradient of weights of output layer
for x, y in mini_batch:
dltNbl_BH, dltNbl_BO, dltNbl_WH, dltNbl_WO = self.backprop(x, y)
nabla_BH += dltNbl_BH
nabla_BO += dltNbl_BO
nabla_WH += dltNbl_WH
nabla_WO += dltNbl_WO
alpha = eta / len(mini_batch)
self.mBiasesH -= alpha * nabla_BH
self.mBiasesO -= alpha * nabla_BO
self.mWeightsH -= alpha * nabla_WH
self.mWeightsO -= alpha * nabla_WO
Network.update_mini_batch = update_mini_batch
def backprop(self, x, y):
# feedforward pass
ZH = self.mWeightsH @ x + self.mBiasesH
AH = sigmoid(ZH)
ZO = self.mWeightsO @ AH + self.mBiasesO
AO = sigmoid(ZO)
# backwards pass, output layer
epsilonO = (AO - y) * sigmoid_prime(ZO)
nabla_BO = epsilonO
nabla_WO = epsilonO @ AH.transpose()
# backwards pass, hidden layer
epsilonH = (self.mWeightsO.transpose() @ epsilonO) * sigmoid_prime(ZH)
nabla_BH = epsilonH
nabla_WH = epsilonH @ x.transpose()
return nabla_BH, nabla_BO, nabla_WH, nabla_WO
Network.backprop = backprop
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y) for x, y in test_data]
return sum(y1 == y2 for y1, y2 in test_results)
Network.evaluate = evaluate
%%time
np.random.seed(1)
net = Network(50)
net.sgd(training_data, 30, 10, 0.25, test_data)
40 * (1 + 784) + 10 * (1 + 40)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As our data is stored as a tuple of numpy arrays, we have to import numpy.
Step2: In order to be able to show the images of the handwritten digits, we use matplotlib.
Step3: We need to import the module random as we are using <em style="color
Step4: The function $\texttt{vectorized_result}(d)$ converts a digit $d \in {0,\cdots,9}$ into a numpy array $\mathbf{y}$ of shape $(10, 1)$ such that we have
Step5: The file mnist.pkl.gz contains a triple of the form
Step6: We store the data in two variables
Step7: The function $\texttt{show_digit}(\texttt{row}, \texttt{columns}, \texttt{offset})$
Step8: Our goal is to find the <em style="color
Step9: The function $\texttt{sigmoid}(x)$ computes the sigmoid of $x$, which is defined as
Step10: The function $\texttt{sigmoid_prime}(x)$ computes the derivative of the sigmoid function for $x$. The implementation is based on the equation
Step11: The class Network is used to represent a feed-forward neural network with one hidden layer.
Step12: Given a neural network $n$ and an input vector $x$ for this neural network, the function $n.\texttt{feedforward}(x)$ compute the output of the neural network.
Step13: Given a neural network $n$, the method sgd(training_data, epochs, mbs, eta, test_data) uses stochastic gradient descent to train the network. The parameters are as follows
Step14: The method update_mini_batch performs one step of gradient descent for the data from one
Step15: Given a neural network $n$, the method $n.\texttt{backprop}(x, y)$ takes a training example $(x,y)$ and calculates the gradient of the cost function with respect to this training example. This is done by implementing the
Step16: Given a neural network n, the method n.evaluate(test_data) uses the test data to compute the number of examples that are predicted correctly by the neural network n.
Step17: What is the number of parameters of our network?
|
7,630
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
import shogun as sg
import numpy as np
# use scipy for generating samples
from scipy.stats import laplace, norm
def sample_gaussian_vs_laplace(n=220, mu=0.0, sigma2=1, b=np.sqrt(0.5)):
# sample from both distributions
X=norm.rvs(size=n)*np.sqrt(sigma2)+mu
Y=laplace.rvs(size=n, loc=mu, scale=b)
return X,Y
mu=0.0
sigma2=1
b=np.sqrt(0.5)
n=220
X,Y=sample_gaussian_vs_laplace(n, mu, sigma2, b)
# plot both densities and histograms
plt.figure(figsize=(18,5))
plt.suptitle("Gaussian vs. Laplace")
plt.subplot(121)
Xs=np.linspace(-2, 2, 500)
plt.plot(Xs, norm.pdf(Xs, loc=mu, scale=sigma2))
plt.plot(Xs, laplace.pdf(Xs, loc=mu, scale=b))
plt.title("Densities")
plt.xlabel("$x$")
plt.ylabel("$p(x)$")
plt.subplot(122)
plt.hist(X, alpha=0.5)
plt.xlim([-5,5])
plt.ylim([0,100])
plt.hist(Y,alpha=0.5)
plt.xlim([-5,5])
plt.ylim([0,100])
plt.legend(["Gaussian", "Laplace"])
plt.title('Samples');
print("Gaussian vs. Laplace")
print("Sample means: %.2f vs %.2f" % (np.mean(X), np.mean(Y)))
print("Samples variances: %.2f vs %.2f" % (np.var(X), np.var(Y)))
# turn data into Shogun representation (columns vectors)
feat_p=sg.features(X.reshape(1,len(X)))
feat_q=sg.features(Y.reshape(1,len(Y)))
# choose kernel for testing. Here: Gaussian
kernel_width=1
kernel=sg.kernel("GaussianKernel", log_width=np.log(kernel_width))
# create mmd instance of test-statistic
mmd=sg.QuadraticTimeMMD()
mmd.set_kernel(kernel)
mmd.set_p(feat_p)
mmd.set_q(feat_q)
# compute biased and unbiased test statistic (default is unbiased)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
biased_statistic=mmd.compute_statistic()
mmd.set_statistic_type(sg.ST_UNBIASED_FULL)
statistic=unbiased_statistic=mmd.compute_statistic()
print("%d x MMD_b[X,Y]^2=%.2f" % (len(X), biased_statistic))
print("%d x MMD_u[X,Y]^2=%.2f" % (len(X), unbiased_statistic))
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(200)
# now show a couple of ways to compute the test
# compute p-value for computed test statistic
p_value=mmd.compute_p_value(statistic)
print("P-value of MMD value %.2f is %.2f" % (statistic, p_value))
# compute threshold for rejecting H_0 for a given test power
alpha=0.05
threshold=mmd.compute_threshold(alpha)
print("Threshold for rejecting H0 with a test power of %.2f is %.2f" % (alpha, threshold))
# performing the test by hand given the above results, note that those two are equivalent
if statistic>threshold:
print("H0 is rejected with confidence %.2f" % alpha)
if p_value<alpha:
print("H0 is rejected with confidence %.2f" % alpha)
# or, compute the full two-sample test directly
# fixed test power, binary decision
binary_test_result=mmd.perform_test(alpha)
if binary_test_result:
print("H0 is rejected with confidence %.2f" % alpha)
num_samples=500
# sample null distribution
null_samples=mmd.sample_null()
# sample alternative distribution, generate new data for that
alt_samples=np.zeros(num_samples)
for i in range(num_samples):
X=norm.rvs(size=n, loc=mu, scale=sigma2)
Y=laplace.rvs(size=n, loc=mu, scale=b)
feat_p=sg.features(np.reshape(X, (1,len(X))))
feat_q=sg.features(np.reshape(Y, (1,len(Y))))
# TODO: reset pre-computed kernel here
mmd.set_p(feat_p)
mmd.set_q(feat_q)
alt_samples[i]=mmd.compute_statistic()
np.std(alt_samples)
def plot_alt_vs_null(alt_samples, null_samples, alpha):
plt.figure(figsize=(18,5))
plt.subplot(131)
plt.hist(null_samples, 50, color='blue')
plt.title('Null distribution')
plt.subplot(132)
plt.title('Alternative distribution')
plt.hist(alt_samples, 50, color='green')
plt.subplot(133)
plt.hist(null_samples, 50, color='blue')
plt.hist(alt_samples, 50, color='green', alpha=0.5)
plt.title('Null and alternative distriution')
# find (1-alpha) element of null distribution
null_samples_sorted=np.sort(null_samples)
quantile_idx=int(len(null_samples)*(1-alpha))
quantile=null_samples_sorted[quantile_idx]
plt.axvline(x=quantile, ymin=0, ymax=100, color='red', label=str(int(round((1-alpha)*100))) + '% quantile of null')
legend();
plot_alt_vs_null(alt_samples, null_samples, alpha)
# optional: plot spectrum of joint kernel matrix
# TODO: it would be good if there was a way to extract the joint kernel matrix for all kernel tests
# get joint feature object and compute kernel matrix and its spectrum
feats_p_q=mmd.get_p_and_q()
mmd.get_kernel().init(feats_p_q, feats_p_q)
K=mmd.get_kernel().get_kernel_matrix()
w,_=np.linalg.eig(K)
# visualise K and its spectrum (only up to threshold)
plt.figure(figsize=(18,5))
plt.subplot(121)
plt.imshow(K, interpolation="nearest")
plt.title("Kernel matrix K of joint data $X$ and $Y$")
plt.subplot(122)
thresh=0.1
plt.plot(w[:len(w[w>thresh])])
title("Eigenspectrum of K until component %d" % len(w[w>thresh]));
# threshold for eigenspectrum
thresh=0.1
# compute number of eigenvalues to use
num_eigen=len(w[w>thresh])
# finally, do the test, use biased statistic
mmd.set_statistic_type(sg.ST_BIASED_FULL)
#tell Shogun to use spectrum approximation
mmd.set_null_approximation_method(sg.NAM_MMD2_SPECTRUM)
mmd.spectrum_set_num_eigenvalues(num_eigen)
mmd.set_num_null_samples(num_samples)
# the usual test interface
statistic=mmd.compute_statistic()
p_value_spectrum=mmd.compute_p_value(statistic)
print("Spectrum: P-value of MMD test is %.2f" % p_value_spectrum)
# compare with ground truth from permutation test
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
p_value_permutation=mmd.compute_p_value(statistic)
print("Bootstrapping: P-value of MMD test is %.2f" % p_value_permutation)
# tell Shogun to use gamma approximation
mmd.set_null_approximation_method(sg.NAM_MMD2_GAMMA)
# the usual test interface
statistic=mmd.compute_statistic()
p_value_gamma=mmd.compute_p_value(statistic)
print("Gamma: P-value of MMD test is %.2f" % p_value_gamma)
# compare with ground truth bootstrapping
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
p_value_spectrum=mmd.compute_p_value(statistic)
print("Bootstrapping: P-value of MMD test is %.2f" % p_value_spectrum)
# type I error is false alarm, therefore sample data under H0
num_trials=50
rejections_gamma=zeros(num_trials)
rejections_spectrum=zeros(num_trials)
rejections_bootstrap=zeros(num_trials)
num_samples=50
alpha=0.05
for i in range(num_trials):
X=norm.rvs(size=n, loc=mu, scale=sigma2)
Y=laplace.rvs(size=n, loc=mu, scale=b)
# simulate H0 via merging samples before computing the
Z=hstack((X,Y))
X=Z[:len(X)]
Y=Z[len(X):]
feat_p=sg.features(reshape(X, (1,len(X))))
feat_q=sg.features(reshape(Y, (1,len(Y))))
# gamma
mmd=sg.QuadraticTimeMMD()
mmd.set_p(feat_p)
mmd.set_q(feat_q)
mmd.set_kernel(kernel)
mmd.set_null_approximation_method(sg.NAM_MMD2_GAMMA)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_gamma[i]=mmd.perform_test(alpha)
# spectrum
mmd=sg.QuadraticTimeMMD()
mmd.set_p(feat_p)
mmd.set_q(feat_q)
mmd.set_kernel(kernel)
mmd.set_null_approximation_method(sg.NAM_MMD2_SPECTRUM)
mmd.spectrum_set_num_eigenvalues(num_eigen)
mmd.set_num_null_samples(num_samples)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_spectrum[i]=mmd.perform_test(alpha)
# bootstrap (precompute kernel)
mmd=sg.QuadraticTimeMMD()
mmd.set_p(feat_p)
mmd.set_q(feat_q)
p_and_q=mmd.get_p_and_q()
kernel.init(p_and_q, p_and_q)
precomputed_kernel=sg.CustomKernel(kernel)
mmd.set_kernel(precomputed_kernel)
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_bootstrap[i]=mmd.perform_test(alpha)
convergence_gamma=cumsum(rejections_gamma)/(arange(num_trials)+1)
convergence_spectrum=cumsum(rejections_spectrum)/(arange(num_trials)+1)
convergence_bootstrap=cumsum(rejections_bootstrap)/(arange(num_trials)+1)
print("Average rejection rate of H0 for Gamma is %.2f" % mean(convergence_gamma))
print("Average rejection rate of H0 for Spectrum is %.2f" % mean(convergence_spectrum))
print("Average rejection rate of H0 for Bootstrapping is %.2f" % mean(rejections_bootstrap))
# paramters of dataset
m=20000
distance=10
stretch=5
num_blobs=3
angle=pi/4
# these are streaming features
gen_p=sg.GaussianBlobsDataGenerator(num_blobs, distance, 1, 0)
gen_q=sg.GaussianBlobsDataGenerator(num_blobs, distance, stretch, angle)
# stream some data and plot
num_plot=1000
features=gen_p.get_streamed_features(num_plot)
features=features.create_merged_copy(gen_q.get_streamed_features(num_plot))
data=features.get_feature_matrix()
figure(figsize=(18,5))
subplot(121)
grid(True)
plot(data[0][0:num_plot], data[1][0:num_plot], 'r.', label='$x$')
title('$X\sim p$')
subplot(122)
grid(True)
plot(data[0][num_plot+1:2*num_plot], data[1][num_plot+1:2*num_plot], 'b.', label='$x$', alpha=0.5)
_=title('$Y\sim q$')
block_size=100
# if features are already under the streaming interface, just pass them
mmd=sg.LinearTimeMMD()
mmd.set_p(gen_p)
mmd.set_q(gen_q)
mmd.set_kernel(kernel)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
# compute an unbiased estimate in linear time
statistic=mmd.compute_statistic()
print("MMD_l[X,Y]^2=%.2f" % statistic)
# note: due to the streaming nature, successive calls of compute statistic use different data
# and produce different results. Data cannot be stored in memory
for _ in range(5):
print("MMD_l[X,Y]^2=%.2f" % mmd.compute_statistic())
# data source
gen_p=sg.GaussianBlobsDataGenerator(num_blobs, distance, 1, 0)
gen_q=sg.GaussianBlobsDataGenerator(num_blobs, distance, stretch, angle)
num_samples=100
print("Number of data is %d" % num_samples)
# retreive some points, store them as non-streaming data in memory
data_p=gen_p.get_streamed_features(num_samples)
data_q=gen_q.get_streamed_features(num_samples)
# example to create mmd (note that num_samples can be maximum the number of data in memory)
mmd=sg.LinearTimeMMD()
mmd.set_p(data_p)
mmd.set_q(data_q)
mmd.set_kernel(sg.GaussianKernel(10, 1))
mmd.set_num_blocks_per_burst(100)
print("Linear time MMD statistic: %.2f" % mmd.compute_statistic())
mmd=sg.LinearTimeMMD()
mmd.set_p(gen_p)
mmd.set_q(gen_q)
mmd.set_kernel(kernel)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
print("m=%d samples from p and q" % m)
print("Binary test result is: " + ("Rejection" if mmd.perform_test(alpha) else "No rejection"))
print("P-value test result is %.2f" % mmd.compute_p_value(mmd.compute_statistic()))
# mmd instance using streaming features
mmd=sg.LinearTimeMMD()
mmd.set_p(gen_p)
mmd.set_q(gen_q)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
sigmas=[2**x for x in np.linspace(-5, 5, 11)]
print("Choosing kernel width from", ["{0:.2f}".format(sigma) for sigma in sigmas])
for i in range(len(sigmas)):
mmd.add_kernel(sg.GaussianKernel(10, sigmas[i]))
# optmal kernel choice is possible for linear time MMD
mmd.set_kernel_selection_strategy(sg.KSM_MAXIMIZE_POWER)
# must be set true for kernel selection
mmd.set_train_test_mode(True)
# select best kernel
mmd.select_kernel()
best_kernel=mmd.get_kernel()
best_kernel=sg.GaussianKernel.obtain_from_generic(best_kernel)
print("Best single kernel has bandwidth %.2f" % best_kernel.get_width())
mmd.set_null_approximation_method(sg.NAM_MMD1_GAUSSIAN);
p_value_best=mmd.compute_p_value(mmd.compute_statistic());
print("Bootstrapping: P-value of MMD test with optimal kernel is %.2f" % p_value_best)
m=5000
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_train_test_mode(False)
num_samples=500
# sample null and alternative distribution, implicitly generate new data for that
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
null_samples=mmd.sample_null()
alt_samples=zeros(num_samples)
for i in range(num_samples):
alt_samples[i]=mmd.compute_statistic()
plot_alt_vs_null(alt_samples, null_samples, alpha)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some Formal Basics (skip if you just want code examples)
Step2: Now how to compare these two sets of samples? Clearly, a t-test would be a bad idea since it basically compares mean and variance of $X$ and $Y$. But we set that to be equal. By chance, the estimates of these statistics might differ, but that is unlikely to be significant. Thus, we have to look at higher order statistics of the samples. In fact, kernel two-sample tests look at all (infinitely many) higher order moments.
Step3: Quadratic Time MMD
Step4: Any sub-class of <a href="http
Step5: Now let us visualise distribution of MMD statistic under $H_0
Step6: Null and Alternative Distribution Illustrated
Step7: Different Ways to Approximate the Null Distribution for the Quadratic Time MMD
Step8: The above plot of the Eigenspectrum shows that the Eigenvalues are decaying extremely fast. We choose the number for the approximation such that all Eigenvalues bigger than some threshold are used. In this case, we will not loose a lot of accuracy while gaining a significant speedup. For slower decaying Eigenspectrums, this approximation might be more expensive.
Step9: The Gamma Moment Matching Approximation and Type I errors
Step10: As we can see, the above example was kind of unfortunate, as the approximation fails badly. We check the type I error to verify that. This works similar to sampling the alternative distribution
Step11: We see that Gamma basically never rejects, which is inline with the fact that the p-value was massively overestimated above. Note that for the other tests, the p-value is also not at its desired value, but this is due to the low number of samples/repetitions in the above code. Increasing them leads to consistent type I errors.
Step12: We now describe the linear time MMD, as described in [1, Section 6], which is implemented in Shogun. A fast, unbiased estimate for the original MMD expression which still uses all available data can be obtained by dividing data into two parts and then compute
Step13: Sometimes, one might want to use <a href="http
Step14: The Gaussian Approximation to the Null Distribution
Step15: Kernel Selection for the MMD -- Overview
Step16: Now perform two-sample test with that kernel
Step17: For the linear time MMD, the null and alternative distributions look different than for the quadratic time MMD as plotted above. Let's sample them (takes longer, reduce number of samples a bit). Note how we can tell the linear time MMD to smulate the null hypothesis, which is necessary since we cannot permute by hand as samples are not in memory)
Step18: And visualise again. Note that both null and alternative distribution are Gaussian, which allows the fast null distribution approximation and the optimal kernel selection
|
7,631
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.signal import medfilt
import matplotlib.pyplot as plt
import kplr
%matplotlib inline
client = kplr.API()
koi = client.koi(1274.01)
lcs = koi.get_light_curves(short_cadence=True)
p = 704.2
time, flux, ferr, med = [], [], [], []
for lc in lcs:
with lc.open() as f:
# The lightcurve data are in the first FITS HDU.
hdu_data = f[1].data
time.append(hdu_data["time"][~np.isnan(hdu_data["pdcsap_flux"])])
flux.append(hdu_data["pdcsap_flux"][~np.isnan(hdu_data["pdcsap_flux"])])
ferr.append(hdu_data["pdcsap_flux_err"][~np.isnan(hdu_data["pdcsap_flux"])])
# Ignora los NaN al hacer append
normFlux, normFerr, phase = flux, ferr, time
for i in range(0,len(flux)):
med.append(np.median(flux[i]))
prom = np.mean(med)
for i in range(0,len(flux)):
normFlux[i] = normFlux[i] - (med[i] - prom)
normFlux[i] = medfilt(normFlux[i], 11)
fig, ax = plt.subplots(2,1,figsize=(15,20))
for i in range(0,len(ax)):
ax[i].set_ylim(0.996,1.0007)
ax[i].set_title('KOI-1274.01')
ax[i].set_xlabel('Phase',size=16)
ax[i].set_ylabel('Normalized flux',size=14)
for i in range(0,len(normFlux)):
normFlux[i] = normFlux[i]/prom
normFerr[i] = normFerr[i]/prom
phase[i] = time[i]/p %1
ax[0].errorbar(phase[i], normFlux[i], normFerr[i], fmt='g.', ecolor='green', ms = 3)
ax[0].plot(phase[i], normFlux[i],'k.')
ax[1].errorbar(phase[i], normFlux[i], normFerr[i], fmt='g.', ecolor='green', ms = 3)
ax[1].plot(phase[i], normFlux[i],'k--', alpha=.2)
ax[1].set_xlim(0.699,0.7005)
plt.show()
plt.close()
df = 0.003
tt = 0.7
tf = 0.4
sintf = np.sin(tf*np.pi/p)**2 # un par de variables auxiliares
sintt = np.sin(tt*np.pi/p)**2
ratio = np.sqrt(df) #Rp/R*
b = np.sqrt( ((1-ratio)**2 - (sintf)/(sintt) *(1+ratio)**2) /(1-(sintf/sintt)) )
aR = np.sqrt( ((1+ratio)**2 - b**2 *(1-sintt)) /sintt )
i = np.arccos(b/aR)
i = np.degrees(i)
rho = aR**3 * 365.25**2 / 215**3 / p**2
print 'Rp/R* \t = \t' + repr(ratio)
print 'b \t = \t' + repr(b)
print 'a/R* \t = \t' + repr(aR)
print 'i \t = \t' + repr(i)
print 'rho \t = \t' + repr(rho) + ' densidades solares'
from scipy.optimize import leastsq
from scipy.interpolate import UnivariateSpline
import scipy.integrate as integrate
w, r = np.loadtxt('kepler_response_hires1.txt', unpack=True)
w = 10*w
S = UnivariateSpline(w,r,s=0,k=1)
min_w = min(w)
max_w = max(w)
idx = np.where((w>min_w)&(w<max_w))[0]
S_wav = np.append(np.append(min_w,w[idx]),max_w)
S_res = np.append(np.append(S(min_w),r[idx]),S(max_w))
I = np.array([])
wavelengths = np.array([])
f = open('grav_4.5_lh_1.25.dat','r')
counter = 0
while(True):
l = f.readline()
if(l==''):
break
# If no jump of line or comment, save the intensities:
if(l[0]!='#' and l[:3]!='\n'):
splitted = l.split('\t')
if(len(splitted)==18):
splitted[-1] = (splitted[-1])[:-1] # The last one always has a jump of line (\n), so erase it.
wavelength = np.double(splitted[0])*10 # Convert wavelengths, which are in nanometers, to angstroms.
intensities = np.double(np.array(splitted[1:])) # Get the intensities.
ndigits = len(str(int(intensities[1])))
# Only if I(1) is different from zero, fit the LDs:
if(intensities[0]!=0.0):
intensities[1:] = intensities[1:]/1e5 # Kurucz doesn't put points on his files (e.g.: 0.8013 is 8013).
intensities[1:] = intensities[1:]*intensities[0] # All the rest of the intensities are normalized w/r to the center one.
if(counter == 0):
I = intensities
else:
I = np.vstack((I,intensities))
wavelengths = np.append(wavelengths,wavelength)
counter = counter + 1
f.close()
mu = np.array([1.0,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.25,0.2,0.15,0.125,0.1,0.075,0.05,0.025,0.01])
# Define the number of mu angles at which we will perform the integrations:
nmus = len(mu)
# Now integrate intensity through each angle:
I_l = np.array([])
for i in range(nmus):
# Interpolate the intensities:
Ifunc = UnivariateSpline(wavelengths,I[:,i],s=0,k=1)
integrand = S_res*Ifunc(S_wav)
integration_results = np.trapz(integrand, x=S_wav)
I_l = np.append(I_l,integration_results)
I0 = I_l/(I_l[0]) # Normalize profile with respect to I(mu = 1):
# Define A matrix for the linear system:
A = np.zeros([2,2])
# Define b vector for the linear system:
b = np.zeros(2)
# Obtain the alpha_n_k and beta_k that fill the A matrix and b vector:
for n in range(1,3,1):
for k in range(1,3,1):
A[n-1,k-1] = sum(((1.0-mu)**n)*((1.0-mu)**k))
b[n-1] = sum(((1.0-mu)**n)*(1.0-I0))
u = list(np.linalg.solve(A,b))
print u
import batman
params = batman.TransitParams() #object to store transit parameters
params.t0 = 0.2 #time of inferior conjunction
params.per = p #orbital period
params.rp = ratio #planet radius (in units of stellar radii)
params.a = 180 #semi-major axis (in units of stellar radii)
params.inc = i #orbital inclination (in degrees)
params.ecc = 0. #eccentricity
params.w = 90. #longitude of periastron (in degrees)
params.limb_dark = "quadratic" #limb darkening model
params.u = u #limb darkening coefficients
t = np.linspace(-720, 720, 6000) #times at which to calculate light curve
m = batman.TransitModel(params, t) #initializes model
fluxBatman = m.light_curve(params) #calculates light curve
plt.plot(t, fluxBatman)
plt.xlabel("Time from central transit")
plt.ylabel("Relative flux")
plt.show()
##############
#oc
koi = client.koi(7016.01)
lcs = koi.get_light_curves(short_cadence=True)
p = koi.koi_period
time, flux, ferr, med = [], [], [], []
for lc in lcs:
with lc.open() as f:
# The lightcurve data are in the first FITS HDU.
hdu_data = f[1].data
time.append(hdu_data["time"][~np.isnan(hdu_data["pdcsap_flux"])])
flux.append(hdu_data["pdcsap_flux"][~np.isnan(hdu_data["pdcsap_flux"])])
ferr.append(hdu_data["pdcsap_flux_err"][~np.isnan(hdu_data["pdcsap_flux"])])
# Ignora los NaN al hacer append
normFlux, normFerr, phase = flux, ferr, time
for i in range(0,len(flux)):
med.append(np.median(flux[i]))
prom = np.mean(med)
for i in range(0,len(flux)):
normFlux[i] = normFlux[i] - (med[i] - prom)
normFlux[i] = medfilt(normFlux[i], 25)
fig, ax = plt.subplots(2,1,figsize=(15,20))
for i in range(0,len(ax)):
ax[i].set_ylim(0.996,1.0007)
ax[i].set_title('KOI-7016.01')
ax[i].set_xlabel('Phase',size=16)
ax[i].set_ylabel('Normalized flux',size=14)
for i in range(0,len(normFlux)):
normFlux[i] = normFlux[i]/prom
normFerr[i] = normFerr[i]/prom
phase[i] = time[i]/p %1
ax[0].errorbar(phase[i], normFlux[i], normFerr[i], fmt='g.', ecolor='green', ms = 3)
ax[0].plot(phase[i], normFlux[i],'k.')
ax[1].errorbar(phase[i], normFlux[i], normFerr[i], fmt='g.', ecolor='green', ms = 3)
ax[1].plot(phase[i], normFlux[i],'k.', alpha=.2)
ax[1].set_xlim(0.762,0.782)
ax[1].set_ylim(0.9985,1.001)
plt.show()
plt.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <hr>
Step2: <hr>
Step3: Se ejecuta batman como se explica en la documentación, entregando como parámetros los valores obtenidos a lo largo de este trabajo.
|
7,632
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting
from astroquery.vizier import Vizier
import scipy.optimize
# Make plots display in notebooks
%matplotlib inline
catalog = Vizier.get_catalogs('J/A+A/605/A100')
period = np.array(catalog[0]['Period'])
log_period = np.log10(period)
k_mag = np.array(catalog[0]['__Ksmag_'])
k_mag_err = np.array(catalog[0]['e__Ksmag_'])
plt.errorbar(log_period, k_mag, k_mag_err, fmt='k.')
plt.xlabel(r'$\log_{10}$(Period [days])')
plt.ylabel('Ks')
model = models.Linear1D()
fitter = fitting.LinearLSQFitter()
best_fit = fitter(model, log_period, k_mag, weights=1.0/k_mag_err**2)
print(best_fit)
plt.errorbar(log_period,k_mag,k_mag_err,fmt='k.')
plt.plot(log_period, best_fit(log_period), color='g', linewidth=3)
plt.xlabel(r'$\log_{10}$(Period [days])')
plt.ylabel('Ks')
N = 100
x1 = np.linspace(0, 4, N) # Makes an array from 0 to 4 of N elements
y1 = x1**3 - 6*x1**2 + 12*x1 - 9
# Now we add some noise to the data
y1 += np.random.normal(0, 2, size=len(y1)) #One way to add random gaussian noise
sigma = 1.5
y1_err = np.ones(N)*sigma
plt.errorbar(x1, y1, yerr=y1_err,fmt='k.')
plt.xlabel('$x_1$')
plt.ylabel('$y_1$')
model_poly = models.Polynomial1D(degree=3)
fitter_poly = fitting.LinearLSQFitter()
best_fit_poly = fitter_poly(model_poly, x1, y1, weights = 1.0/y1_err**2)
print(best_fit_poly)
fitter_poly_2 = fitting.SimplexLSQFitter()
best_fit_poly_2 = fitter_poly_2(model_poly, x1, y1, weights = 1.0/y1_err**2)
print(best_fit_poly_2)
def calc_reduced_chi_square(fit, x, y, yerr, N, n_free):
'''
fit (array) values for the fit
x,y,yerr (arrays) data
N total number of points
n_free number of parameters we are fitting
'''
return 1.0/(N-n_free)*sum(((fit - y)/yerr)**2)
reduced_chi_squared = calc_reduced_chi_square(best_fit_poly(x1), x1, y1, y1_err, N, 4)
print('Reduced Chi Squared with LinearLSQFitter: {}'.format(reduced_chi_squared))
reduced_chi_squared = calc_reduced_chi_square(best_fit_poly_2(x1), x1, y1, y1_err, N, 4)
print('Reduced Chi Squared with SimplexLSQFitter: {}'.format(reduced_chi_squared))
plt.errorbar(x1, y1, yerr=y1_err,fmt='k.')
plt.plot(x1, best_fit_poly(x1), color='r', linewidth=3, label='LinearLSQFitter()')
plt.plot(x1, best_fit_poly_2(x1), color='g', linewidth=3, label='SimplexLSQFitter()')
plt.xlabel(r'$\log_{10}$(Period [days])')
plt.ylabel('Ks')
plt.legend()
mu, sigma, amplitude = 0.0, 10.0, 10.0
N2 = 100
x2 = np.linspace(-30, 30, N)
y2 = amplitude * np.exp(-(x2-mu)**2 / (2*sigma**2))
y2 = np.array([y_point + np.random.normal(0, 1) for y_point in y2]) #Another way to add random gaussian noise
sigma = 1
y2_err = np.ones(N)*sigma
plt.errorbar(x2, y2, yerr=y2_err, fmt='k.')
plt.xlabel('$x_2$')
plt.ylabel('$y_2$')
model_gauss = models.Gaussian1D()
fitter_gauss = fitting.LevMarLSQFitter()
best_fit_gauss = fitter_gauss(model_gauss, x2, y2, weights=1/y2_err**2)
print(best_fit_gauss)
model_gauss.param_names
cov_diag = np.diag(fitter_gauss.fit_info['param_cov'])
print(cov_diag)
print('Amplitude: {} +\- {}'.format(best_fit_gauss.amplitude.value, np.sqrt(cov_diag[0])))
print('Mean: {} +\- {}'.format(best_fit_gauss.mean.value, np.sqrt(cov_diag[1])))
print('Standard Deviation: {} +\- {}'.format(best_fit_gauss.stddev.value, np.sqrt(cov_diag[2])))
def f(x,a,b,c):
return a * np.exp(-(x-b)**2/(2.0*c**2))
p_opt, p_cov = scipy.optimize.curve_fit(f,x2, y2, sigma=y1_err)
a,b,c = p_opt
best_fit_gauss_2 = f(x2,a,b,c)
print(p_opt)
print('Amplitude: {} +\- {}'.format(p_opt[0], np.sqrt(p_cov[0,0])))
print('Mean: {} +\- {}'.format(p_opt[1], np.sqrt(p_cov[1,1])))
print('Standard Deviation: {} +\- {}'.format(p_opt[2], np.sqrt(p_cov[2,2])))
reduced_chi_squared = calc_reduced_chi_square(best_fit_gauss(x2), x2, y2, y2_err, N2, 3)
print('Reduced Chi Squared using astropy.modeling: {}'.format(reduced_chi_squared))
reduced_chi_squared = calc_reduced_chi_square(best_fit_gauss_2, x2, y2, y2_err, N2, 3)
print('Reduced Chi Squared using scipy: {}'.format(reduced_chi_squared))
plt.errorbar(x2, y2, yerr=y2_err, fmt='k.')
plt.plot(x2, best_fit_gauss(x2), 'g-', linewidth=6, label='astropy.modeling')
plt.plot(x2, best_fit_gauss_2, 'r-', linewidth=2, label='scipy')
plt.xlabel('$x_2$')
plt.ylabel('$y_2$')
plt.legend()
N3 = 100
x3 = np.linspace(0, 3, N3)
y3 = 5.0 * np.sin(2 * np.pi * x3)
y3 = np.array([y_point + np.random.normal(0, 1) for y_point in y3])
sigma = 1.5
y3_err = np.ones(N)*sigma
plt.errorbar(x3, y3, yerr=y3_err, fmt='k.')
plt.xlabel('$x_3$')
plt.ylabel('$y_3$')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) Fit a Linear model
Step2: This catalog has a lot of information, but for this tutorial we are going to work only with periods and magnitudes. Let's grab them using the keywords 'Period' and __Ksmag__. Note that 'e__Ksmag_' refers to the error bars in the magnitude measurements.
Step3: Let's take a look at the magnitude measurements as a function of period
Step4: One could say that there is a linear relationship between log period and magnitudes. To probe it, we want to make a fit to the data. This is where astropy.modeling is useful. We are going to understand how in three simple lines we can make any fit we want. We are going to start with the linear fit, but first, let's understand what a model and a fitter are.
Step5: Step 2
Step6: Step 3
Step7: And that's it!
Step8: Conclusion
Step9: Let's plot it to see how it looks
Step10: To fit this data let's remember the three steps
Step11: What would happend if we use a different fitter (method)? Let's use the same model but with SimplexLSQFitter as fitter.
Step12: Note that we got a warning after using SimplexLSQFitter to fit the data. The first line says
Step13: As we can see, the Reduced Chi Square for the first fit is closer to one, which means this fit is better. Note that this is what we expected after the discussion of the warnings.
Step14: Results are as espected, the fit performed with the linear fitter is better than the second, non linear one.
Step15: Let's do our three steps to make the fit we want. For this fit we're going to use a non-linear fitter, LevMarLSQFitter, because the model we need (Gaussian1D) is non-linear in the parameters.
Step16: We can get the covariance matrix from LevMarLSQFitter, which provides an error for our fit parameters by doing fitter.fit_info['param_cov']. The elements in the diagonal of this matrix are the square of the errors. We can check the order of the parameters using
Step17: Then
Step18: We can apply the same method with scipy.optimize.curve_fit, and compare the results using again the Reduced Chi Square Value.
Step19: Compare results
Step20: As we can see there is a very small difference in the Reduced Chi Squared. This actually needed to happen, because the fitter in astropy.modeling uses scipy to fit. The advantage of using astropy.modeling is you only need to change the name of the fitter and the model to perform a completely different fit, while scipy require us to remember the expression of the function we wanted to use.
Step21: Conclusion
|
7,633
|
<ASSISTANT_TASK:>
Python Code:
df['Count'].plot.box()
df.groupby('Year')['Count'].sum().sort_values(ascending=False)
df.groupby('Year')['Count'].sum().mean()
fig, ax = plt.subplots(figsize=(9, 6))
df.groupby('Year')['Count'].sum().plot.barh()
mean = df.groupby('Year')['Count'].sum().mean()
ax.plot([mean, mean], [0, 12], c='blue', linestyle="-", linewidth=0.5)
ax.annotate(s="Mean of death registered, 190,629.6", xy=(120000,0), color='Blue')
df.groupby('Sex')['Count'].sum()
df.groupby(['Year', 'Sex'])['Count'].sum()
fig, ax = plt.subplots(figsize=(9, 7))
df.groupby(['Year', 'Sex'])['Count'].sum().plot(color=['darkred', 'blue'],kind='bar', title="deaths by gender over time")
ax.set_ylabel('total deaths')
ax.set_ylabel('Sex')
ax.set_ylim((0,110000))
df.groupby(['Cause of Death', 'Sex'])['Count'].sum().sort_values(ascending=False).head(6)
df.groupby(['Cause of Death', 'Sex'])['Count'].sum().sort_values(ascending=False)
#Disease of heart is the number 1
df.groupby('Cause of Death')['Count'].sum().sort_values(ascending=False).head(1)
df.groupby('Cause of Death')['Count'].sum().sort_values(ascending=False).head(10)
fig, ax = plt.subplots(figsize=(9, 7))
df.groupby('Cause of Death')['Count'].sum().sort_values(ascending=True).plot.barh()
ax.set_xlim((0,400000))
df.groupby('Ethnicity')['Count'].sum().sort_values(ascending=False)
df.groupby('Ethnicity')['Count'].sum().plot.barh(color=['Black', 'Black', 'Black', 'darkred'])
fig, ax = plt.subplots(figsize=(9, 7))
only_whites = df[df['Ethnicity'] == 'NON-HISPANIC WHITE']
only_whites.groupby('Cause of Death')['Count'].sum().sort_values(ascending=True).plot.barh(color='blue')
fig, ax = plt.subplots(figsize=(9, 7))
only_blacks = df[df['Ethnicity'] == 'NON-HISPANIC BLACK']
only_blacks.groupby('Cause of Death')['Count'].sum().sort_values(ascending=True).plot.barh(color='black')
fig, ax = plt.subplots(figsize=(9, 7))
only_hispanic = df[df['Ethnicity'] == 'HISPANIC']
only_hispanic.groupby('Cause of Death')['Count'].sum().sort_values(ascending=True).plot.barh(color='brown')
only_asians = df[df['Ethnicity'] == 'ASIAN & PACIFIC ISLANDER']
fig, ax = plt.subplots(figsize=(9, 7))
only_asians.groupby('Cause of Death')['Count'].sum().sort_values(ascending=True).plot.barh(color='green')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. In what year new yorkers died the most?
Step2: 2. Who is more likely to die, a male newyorker or a female new yorker?
Step3: 3. Is Sex (in)difference in the data the same for all the years available?
Step4: 4. Which one are the most common deseases for men and woman? Is there a difference?
Step5: 5. What about in other deseases? Are there any sex differences?
Step6: 6. What is the leading cause of death?
Step7: 7. What other deseases are frequent in newyorkers?
Step8: Hearth Deseases and Cancer are by far the most common causes of death!
Step9: 8. Which ethnicity is overall more vulnerable to deseases in NYC?
Step10: 9. Is distribution between ethnicities the same? What about white newyorkers?
Step11: 10. Same for african americans?
Step12: 11. What about spanish and asians?
Step13: Asians are by far the most different population in relations to causes of death. It has the highest incidents of death by Cancer
|
7,634
|
<ASSISTANT_TASK:>
Python Code:
from floweaver import *
dataset = Dataset.from_csv('us-energy-consumption.csv',
dim_process_filename='us-energy-consumption-processes.csv')
sources = ['Solar', 'Nuclear', 'Hydro', 'Wind', 'Geothermal',
'Natural_Gas', 'Coal', 'Biomass', 'Petroleum']
uses = ['Residential', 'Commercial', 'Industrial', 'Transportation']
nodes = {
'sources': ProcessGroup('type == "source"', Partition.Simple('process', sources), title='Sources'),
'imports': ProcessGroup(['Net_Electricity_Import'], title='Net electricity imports'),
'electricity': ProcessGroup(['Electricity_Generation'], title='Electricity Generation'),
'uses': ProcessGroup('type == "use"', partition=Partition.Simple('process', uses)),
'energy_services': ProcessGroup(['Energy_Services'], title='Energy services'),
'rejected': ProcessGroup(['Rejected_Energy'], title='Rejected energy'),
'direct_use': Waypoint(Partition.Simple('source', [
# This is a hack to hide the labels of the partition, there should be a better way...
(' '*i, [k]) for i, k in enumerate(sources)
])),
}
ordering = [
[[], ['sources'], []],
[['imports'], ['electricity', 'direct_use'], []],
[[], ['uses'], []],
[[], ['rejected', 'energy_services'], []]
]
bundles = [
Bundle('sources', 'electricity'),
Bundle('sources', 'uses', waypoints=['direct_use']),
Bundle('electricity', 'uses'),
Bundle('imports', 'uses'),
Bundle('uses', 'energy_services'),
Bundle('uses', 'rejected'),
Bundle('electricity', 'rejected'),
]
palette = {
'Solar': 'gold',
'Nuclear': 'red',
'Hydro': 'blue',
'Wind': 'purple',
'Geothermal': 'brown',
'Natural_Gas': 'steelblue',
'Coal': 'black',
'Biomass': 'lightgreen',
'Petroleum': 'green',
'Electricity': 'orange',
'Rejected energy': 'lightgrey',
'Energy services': 'dimgrey',
}
sdd = SankeyDefinition(nodes, bundles, ordering,
flow_partition=dataset.partition('type'))
weave(sdd, dataset, palette=palette) \
.to_widget(width=700, height=450, margins=dict(left=100, right=120), debugging=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the dataset
Step2: This defines the order the nodes appear in
Step3: Now define the Sankey diagram definition.
Step4: Define the colours to roughly imitate the original Sankey diagram
Step5: And here's the result!
|
7,635
|
<ASSISTANT_TASK:>
Python Code:
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.
Step2: Colab notebooks allow you to combine executable code and rich text in a single document, along with images, HTML, LaTeX and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see Overview of Colab. To create a new Colab notebook you can use the File menu above, or use the following link
|
7,636
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from sklearn import tree
X = [[0, 0], [1, 2]]
y = [0, 1]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y)
clf.predict([[2., 2.]])
clf.predict_proba([[2. , 2.]])
clf.predict([[0.4, 1.2]])
clf.predict_proba([[0.4, 1.2]])
clf.predict_proba([[0, 0.2]])
from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
iris.data[0:5]
iris.feature_names
X = iris.data[:, 2:]
y = iris.target
y
clf = tree.DecisionTreeClassifier(random_state=42)
clf = clf.fit(X, y)
from sklearn.tree import export_graphviz
export_graphviz(clf,
out_file="tree.dot",
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True)
import graphviz
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True)
graph = graphviz.Source(dot_data)
import numpy as np
import seaborn as sns
sns.set_style('whitegrid')
import matplotlib.pyplot as plt
%matplotlib inline
df = sns.load_dataset('iris')
df.head()
col = ['petal_length', 'petal_width']
X = df.loc[:, col]
species_to_num = {'setosa': 0,
'versicolor': 1,
'virginica': 2}
df['tmp'] = df['species'].map(species_to_num)
y = df['tmp']
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y)
X[0:5]
X.values
X.values.reshape(-1,1)
Xv = X.values.reshape(-1,1)
Xv
h = 0.02 # set the spacing
Xv.min()
Xv.max() + 1
x_min, x_max = Xv.min(), Xv.max() + 1
y.min()
y.max() + 1
y_min, y_max = y.min(), y.max() + 1
y_min
y_max
np.arange(x_min, x_max, h)
np.arange(y_min, y_max, h)
np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
xx
yy
xx.ravel()
xx.ravel?
yy.ravel()
np.c_[xx.ravel(), yy.ravel()]
np.c_?
pd.DataFrame(np.c_[xx.ravel(), yy.ravel()])
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z
xx.shape
z.shape
z = z.reshape(xx.shape)
z.shape
plt.contourf?
fig = plt.figure(figsize=(16,10))
ax = plt.contourf(xx, yy, z, cmap = 'afmhot', alpha=0.3);
fig = plt.figure(figsize=(16,10))
plt.scatter(X.values[:, 0], X.values[:, 1], c=y, s=80,
alpha=0.9, edgecolors='g');
fig = plt.figure(figsize=(16,10))
ax = plt.contourf(xx, yy, z, cmap = 'afmhot', alpha=0.3);
plt.scatter(X.values[:, 0], X.values[:, 1], c=y, s=80,
alpha=0.9, edgecolors='g');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DecisionTreeClassifier is capable of both binary (where the labels are [-1, 1]) classification and multiclass (where the labels are [0, …, K-1]) classification.
Step2: Start Here
Step3: matplotlib documentation
|
7,637
|
<ASSISTANT_TASK:>
Python Code:
def check_python_version():
print 'Python version:\n', sys.version
assert sys.version_info < (3,0)
check_python_version()
def improved_check_python_version():
print 'Python version:\n', sys.version
try:
assert sys.version_info < (3,0)
except:
raise AssertionError('Incompatible version of Python: use Python version < 3.0')
improved_check_python_version()
def test_type_float(var):
if not isinstance(var, float):
raise TypeError('Expected input type == float')
f = 1.
# Since the input type is a float, no error is raised.
test_type_float(f)
i = 1
# Since the input type is a list, an error is raised.
test_type_float(i)
def incorrect_divide_by_two(var):
return var / 2
print incorrect_divide_by_two(f)
print incorrect_divide_by_two(i)
def correct_divide_by_two(var):
'''
Divides input by two.
INPUT
var : float
'''
test_type_float(var)
return var / 2
correct_divide_by_two?
print correct_divide_by_two(f)
print correct_divide_by_two(i)
def divide_by_two(var):
if isinstance(var, int):
warnings.warn('Performing floor division. Input type == int', Warning)
return var / 2
divide_by_two(1)
divide_by_two(np.array([1]))
def divide_by_two(var):
if isinstance(var, int):
warnings.warn('Performing floor division. Input type == int', Warning)
if isinstance(var, np.ndarray):
if var.dtype == int:
warnings.warn('Performing floor division. numpy.dtype == int', Warning)
return var / 2
divide_by_two(np.array([1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Raising errors
Step2: Raising warnings
|
7,638
|
<ASSISTANT_TASK:>
Python Code:
# Generate data
import numpy as np
import pandas as pd
raw_data = np.array([0] * 30 + [1] * 70) # Representing heads by 0 and tails by 1
data = pd.DataFrame(raw_data, columns=['coin'])
print(data)
# Defining the Bayesian Model
from pgmpy.models import BayesianModel
from pgmpy.estimators import MaximumLikelihoodEstimator, BayesianEstimator
model = BayesianModel()
model.add_node('coin')
# Fitting the data to the model using Maximum Likelihood Estimator
model.fit(data, estimator_type=MaximumLikelihoodEstimator)
print(model.get_cpds('coin'))
# Fitting the data to the model using Bayesian Estimator with Dirichlet prior with equal pseudo counts.
model.fit(data, estimator_type=BayesianEstimator, prior_type='dirichlet', pseudo_counts={'coin': [50, 50]})
print(model.get_cpds('coin'))
# Generating radom data with each variable have 2 states and equal probabilities for each state
import numpy as np
import pandas as pd
raw_data = np.random.randint(low=0, high=2, size=(1000, 5))
data = pd.DataFrame(raw_data, columns=['D', 'I', 'G', 'L', 'S'])
print(data)
# Defining the model
from pgmpy.models import BayesianModel
from pgmpy.estimators import MaximumLikelihoodEstimator, BayesianEstimator
model = BayesianModel([('D', 'G'), ('I', 'G'), ('I', 'S'), ('G', 'L')])
# Learing CPDs using Maximum Likelihood Estimators
model.fit(data, estimator_type=MaximumLikelihoodEstimator)
for cpd in model.get_cpds():
print("CPD of {variable}:".format(variable=cpd.variable))
print(cpd)
# Learning with Bayesian Estimator using dirichlet prior for each variable.
pseudo_counts = {'D': [300, 700], 'I': [500, 500], 'G': [800, 200], 'L': [500, 500], 'S': [400, 600]}
model.fit(data, estimator_type=BayesianEstimator, prior_type='dirichlet', pseudo_counts=pseudo_counts)
for cpd in model.get_cpds():
print("CPD of {variable}:".format(variable=cpd.variable))
print(cpd)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can see that we get the results as expected. In the maximum likelihood case we got the probability just based on the data where as in the bayesian case we had a prior of $ P(H) = 0.5 $ and $ P(T) = 0.5 $, therefore with 30% heads and 70% tails in the data we got a posterior of $ P(H) = 0.4 $ and $ P(T) = 0.6 $.
Step2: As the data was randomly generated with equal probabilities for each state we can see here that all the probability values are close to 0.5 which we expected. Now coming to the Bayesian Estimator
|
7,639
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from keras.utils import np_utils
from keras.datasets import mnist
# for Multi-layer Perceptron (MLP) model
from keras.models import Sequential
from keras.layers import Dense
# for Convolutional Neural Network (CNN) model
from keras.layers import Dropout, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
# fix for issue: https://github.com/fchollet/keras/issues/2681
from keras import backend as K
K.set_image_dim_ordering('th')
(X_train, y_train), (X_test, y_test) = mnist.load_data()
plt.figure(figsize=[20,8])
for i in range(6):
plt.subplot(1,6,i+1)
#plt.imshow(X_train[i])
plt.imshow(X_train[i], cmap='gray', interpolation='none')
plt.title("Class {}".format(y_train[i]))
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
print (X_train.shape)
print (y_train.shape)
# print first train image values
# it contains a matrix of 28 rows and 28 cols
print (X_train[0])
# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')
print (num_pixels, X_train.shape, X_test.shape)
print (X_train[1])
# pixel values are gray scale between 0 and 255
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
print (X_train[1])
print (y_train.shape)
print (y_train[0])
# one hot encode outputs
# note that we have new variables with capital Y
# Y_train is different than y_train
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
num_classes = Y_test.shape[1]
print (y_train.shape, Y_train.shape)
print (y_train[0], Y_train[0])
def baseline_model():
# create model
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = baseline_model()
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=5, batch_size=200, verbose=1)
model.summary()
scores = model.evaluate(X_test, Y_test, verbose=0)
print (scores)
print ('Score: {}'.format(scores[0]))
print ('Accuracy: {}'.format(scores[1]))
# get predicted values
predicted_classes = model.predict_classes(X_test)
# get index list of all correctly predicted values
correct_indices = np.nonzero(np.equal(predicted_classes, y_test))[0]
# get index list of all incorrectly predicted values
incorrect_indices = np.nonzero(np.not_equal(predicted_classes, y_test))[0]
print ('Correctly predicted: %i' % np.size(correct_indices))
print ('Incorrectly predicted: %i' % np.size(incorrect_indices))
plt.figure(figsize=[20,8])
for i, correct in enumerate(correct_indices[:6]):
plt.subplot(1,6,i+1)
plt.imshow(X_test[correct].reshape(28,28), cmap='gray', interpolation='none')
plt.title("Predicted {}, Class {}".format(predicted_classes[correct], y_test[correct]))
plt.figure(figsize=[20,8])
for i, incorrect in enumerate(incorrect_indices[:6]):
plt.subplot(1,6,i+1)
plt.imshow(X_test[incorrect].reshape(28,28), cmap='gray', interpolation='none')
plt.title("Predicted {}, Class {}".format(predicted_classes[incorrect], y_test[incorrect]))
from sklearn.metrics import confusion_matrix
import seaborn as sns
sns.set() # setting seaborn default for plots
class_names = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, predicted_classes)
np.set_printoptions(precision=2)
print ('Confusion Matrix in Numbers')
print (cnf_matrix)
print ('')
cnf_matrix_percent = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print ('Confusion Matrix in Percentage')
print (cnf_matrix_percent)
print ('')
true_class_names = class_names
predicted_class_names = class_names
df_cnf_matrix = pd.DataFrame(cnf_matrix,
index = true_class_names,
columns = predicted_class_names)
df_cnf_matrix_percent = pd.DataFrame(cnf_matrix_percent,
index = true_class_names,
columns = predicted_class_names)
plt.figure(figsize = (8,6))
#plt.subplot(121)
ax = sns.heatmap(df_cnf_matrix, annot=True, fmt='d')
ax.set_ylabel('True values')
ax.set_xlabel('Predicted values')
ax.set_title('Confusion Matrix in Numbers')
'''
plt.subplot(122)
ax = sns.heatmap(df_cnf_matrix_percent, annot=True)
ax.set_ylabel('True values')
ax.set_xlabel('Predicted values')
'''
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape to be [samples][pixels][width][height]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
print (num_pixels, X_train.shape, X_test.shape)
print (X_train[1])
# pixel values are gray scale between 0 and 255
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
print (X_train[1])
print (y_train.shape)
print (y_train[0])
# one hot encode outputs
# note that we have new variables with capital Y
# Y_train is different than y_train
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
num_classes = Y_test.shape[1]
print (y_train.shape, Y_train.shape)
print (y_train[0], Y_train[0])
# baseline model for CNN
def baseline_model():
# create model
model = Sequential()
model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = baseline_model()
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=5, batch_size=200, verbose=1)
model.summary()
scores = model.evaluate(X_test, Y_test, verbose=0)
print (scores)
print ('Score: {}'.format(scores[0]))
print ('Accuracy: {}'.format(scores[1]))
# get predicted values
predicted_classes = model.predict_classes(X_test)
# get index list of all correctly predicted values
correct_indices = np.nonzero(np.equal(predicted_classes, y_test))[0]
# get index list of all incorrectly predicted values
incorrect_indices = np.nonzero(np.not_equal(predicted_classes, y_test))[0]
print ('Correctly predicted: %i' % np.size(correct_indices))
print ('Incorrectly predicted: %i' % np.size(incorrect_indices))
plt.figure(figsize=[20,8])
for i, correct in enumerate(correct_indices[:6]):
plt.subplot(1,6,i+1)
plt.imshow(X_test[correct].reshape(28,28), cmap='gray', interpolation='none')
plt.title("Predicted {}, Class {}".format(predicted_classes[correct], y_test[correct]))
plt.figure(figsize=[20,8])
for i, incorrect in enumerate(incorrect_indices[:6]):
plt.subplot(1,6,i+1)
plt.imshow(X_test[incorrect].reshape(28,28), cmap='gray', interpolation='none')
plt.title("Predicted {}, Class {}".format(predicted_classes[incorrect], y_test[incorrect]))
from sklearn.metrics import confusion_matrix
import seaborn as sns
sns.set() # setting seaborn default for plots
class_names = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, predicted_classes)
np.set_printoptions(precision=2)
print ('Confusion Matrix in Numbers')
print (cnf_matrix)
print ('')
cnf_matrix_percent = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print ('Confusion Matrix in Percentage')
print (cnf_matrix_percent)
print ('')
true_class_names = class_names
predicted_class_names = class_names
df_cnf_matrix = pd.DataFrame(cnf_matrix,
index = true_class_names,
columns = predicted_class_names)
df_cnf_matrix_percent = pd.DataFrame(cnf_matrix_percent,
index = true_class_names,
columns = predicted_class_names)
plt.figure(figsize = (8,6))
#plt.subplot(121)
ax = sns.heatmap(df_cnf_matrix, annot=True, fmt='d')
ax.set_ylabel('True values')
ax.set_xlabel('Predicted values')
ax.set_title('Confusion Matrix in Numbers')
'''
plt.subplot(122)
ax = sns.heatmap(df_cnf_matrix_percent, annot=True)
ax.set_ylabel('True values')
ax.set_xlabel('Predicted values')
'''
submissions = pd.DataFrame({'ImageId':list(range(1,len(predicted_classes) + 1)), "Label": predicted_classes})
#submissions.to_csv("submission.csv", index=False, header=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Data
Step2: Plotting images and their class values
Step3: Viewing shape and content of data
Step4: Flattening images
Step5: Normalizing input values
Step6: Converting target variable values into one-hot format
Step7: Define Simple Perceptron Model
Step8: Fit and Evaluate Model
Step9: Plot correctly and incorrectly predicted images
Step10: Confusion Matrix
Step11: The above confusion matrix heatmap shows that
Step12: Reshaping images
Step13: Normalizing input values
Step14: Converting target variable values into one-hot format
Step15: Define Convolutional Neural Network (CNN) Model
Step16: Fit and Evaluate Model
Step17: Accuracy (98.75%) of Convolution Neural Network (CNN) model has improved as compared to the accuracy (97.91%) of Multi-layer Perceptron (MLP) model.
Step18: Confusion Matrix
Step19: Using Multi-layer Perceptron (MLP) Model, we had the following heatmap outcome
|
7,640
|
<ASSISTANT_TASK:>
Python Code:
def f(t, x):
# Se importan funciones matematicas necesarias
from numpy import matrix, sin, cos
# Se desenvuelven las variables que componen al estado
q1, q2, q̇1, q̇2 = x
# Se definen constantes del sistema
g = 9.81
m1, m2, J1, J2 = 0.3, 0.2, 0.0005, 0.0002
l1, l2 = 0.4, 0.3
τ1, τ2 = 0, 0
# Se agrupan terminos en vectores
q̇ = matrix([[q̇1], [q̇2]])
τ = matrix([[τ1], [τ2]])
# Se calculan terminos comúnes
μ1 = m2*l2**2
μ2 = m2*l1*l2
c1 = cos(q1)
c2 = cos(q2)
s2 = sin(q2)
c12 = cos(q1 + q2)
# Se calculan las matrices de la ecuación de movimiento
# ESCRIBE TU CODIGO AQUI
raise NotImplementedError
# Se calculan las variables a devolver por el sistema
# ESCRIBE TU CODIGO AQUI
raise NotImplementedError
q1pp = qpp.item(0)
q2pp = qpp.item(1)
# Se devuelve la derivada de las variables de entrada
return [q1p, q2p, q1pp, q2pp]
from numpy.testing import assert_almost_equal
assert_almost_equal(f(0, [0, 0, 0, 0]), [0,0,-1392.38, 3196.16], 2)
assert_almost_equal(f(0, [1, 1, 0, 0]), [0,0,-53.07, 104.34], 2)
print("Sin errores")
from robots.simuladores import simulador
%matplotlib widget
ts, xs = simulador(puerto_zmq="5551", f=f, x0=[0, 0, 0, 0], dt=0.02)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mandamos llamar al simulador
|
7,641
|
<ASSISTANT_TASK:>
Python Code:
import numpy
import chaospy
distribution = chaospy.Uniform(0, 15)
samples = distribution.sample(10, rule="sobol")
evaluations = samples*numpy.sin(samples)
evaluations.round(4)
from matplotlib import pyplot
expansion = chaospy.generate_expansion(9, distribution, normed=True)
t = numpy.linspace(0, 15, 200)
pyplot.rc("figure", figsize=[15, 6])
pyplot.plot(t, expansion(t).T)
pyplot.axis([0, 15, -3, 3])
pyplot.show()
from sklearn.linear_model import LarsCV
lars = LarsCV(fit_intercept=False, max_iter=5)
pce, coeffs = chaospy.fit_regression(
expansion, samples, evaluations, model=lars, retall=True)
expansion_ = expansion[coeffs != 0]
pce.round(2)
lars = LarsCV(fit_intercept=False, max_iter=5)
lars.fit(expansion(samples).T, evaluations)
expansion_ = expansion[lars.coef_ != 0]
lars.coef_.round(4)
print("number of expansion terms total:", len(expansion))
print("number of expansion terms included:", len(expansion_))
import gstools
model = gstools.Gaussian(dim=1, var=1)
pck = gstools.krige.Universal(model, samples, evaluations, list(expansion_))
pck(samples)
assert numpy.allclose(pck.field, evaluations)
uk = gstools.krige.Universal(model, samples, evaluations, "linear")
uk(samples)
assert numpy.allclose(uk.field, evaluations)
pck(t)
mu, sigma = pck.field, numpy.sqrt(pck.krige_var)
pyplot.plot(t, mu, label="pck")
pyplot.fill_between(t, mu-sigma, mu+sigma, alpha=0.4)
uk(t)
mu, sigma = uk.field, numpy.sqrt(uk.krige_var)
pyplot.plot(t, mu, label="uk")
pyplot.fill_between(t, mu-sigma, mu+sigma, alpha=0.4)
pyplot.plot(t, pce(t), label="pce")
pyplot.scatter(samples, evaluations, color="k", label="samples")
pyplot.axis([0, 15, -12, 15])
pyplot.legend(loc="upper left")
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The goal is to create a so called "polynomial chaos kriging" model as defined in the paper with the same name. We are going to do this using the following steps
Step2: As chaospy does not support least angular regression, we use the scikit-learn implementation.
Step3: Note that the same coefficients can be created from the lars model directly, but that does not yield a fitted expansion
Step4: This resulted in a reduction of the number of polynomials
Step5: With the number of polynomials reduced, we can create our kriging model.
Step6: For reference, we also create a more traditional universal kriging model with linear drift.
Step7: Lastly we visually compare the models by plotting the mean and standard deviations against each other
|
7,642
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-3', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
7,643
|
<ASSISTANT_TASK:>
Python Code:
#Version 1
def main():
name, age = input("Enter name") , int( input("Enter age"))
for i in range( int(input("Enter a number"))):
print("You will turn 100 in {}".format(2017-age+100))
main()
from datetime import date
today = str(date.today()).split("-")
print(type(today))
print(today[0])
#Version 2
from datetime import date
def main():
name, age = input("Enter name") , int( input("Enter age"))
thisYear = find_years()
for i in range( int(input("Enter a number"))):
print("You will turn 100 in {}".format(thisYear-age+100))
def find_years():
today = str(date.today()).split('-')
return int(today[0])
main()
from datetime import date
def main():
name, age = get_name() , get_age()
thisYear = find_years()
print("Hi {} ! You will turn 100 in {} \n".format(name,thisYear-age+100))
for i in range( int(input("Please enter how many times the above message should show : \n"))):
print("Hi {} ! You will turn 100 in {}".format(name,thisYear-age+100))
def get_name():
return input("Please enter your name : \n")
def get_age():
return int(input("Please enter your age : \n"))
def find_years():
today = str(date.today()).split('-')
return int(today[0])
if __name__ == '__main__':
main()
from datetime import date
def main():
name = get_name()
age = get_age()
thisYear = find_years()
turns100Year = calculate_year(age,thisYear)
show_msg(name,turns100Year)
for i in range( int(input("Please enter how many times the above message should show : \n"))):
print("Hi {} ! You will turn 100 in {}".format(name,thisYear-age+100))
def get_name():
return input("Please enter your name : \n")
def get_age():
return int(input("Please enter your age : \n"))
def find_years():
today = str(date.today()).split('-')
return int(today[0])
def show_msg(name,turns100Year):
print("Hi {} ! You will turn 100 in {} \n".format(name,turns100Year))
def calculate_year(age,thisYear):
assert age >= 0 , "Age is not allowed to be less than zero"
return thisYear-age+100
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using datetime module to find current date and extracting year from the current date
Step2: Use the above and modify our program
|
7,644
|
<ASSISTANT_TASK:>
Python Code:
def parse_fastq(fh):
Parse reads from a FASTQ filehandle. For each read, we
return a name, nucleotide-string, quality-string triple.
reads = []
while True:
first_line = fh.readline()
if len(first_line) == 0:
break # end of file
name = first_line[1:].rstrip()
seq = fh.readline().rstrip()
fh.readline() # ignore line starting with +
qual = fh.readline().rstrip()
reads.append((name, seq, qual))
return reads
fastq_string = '''@ERR294379.100739024 HS24_09441:8:2203:17450:94030#42/1
AGGGAGTCCACAGCACAGTCCAGACTCCCACCAGTTCTGACGAAATGATG
+
BDDEEF?FGFFFHGFFHHGHGGHCH@GHHHGFAHEGFEHGEFGHCCGGGF
@ERR294379.136275489 HS24_09441:8:2311:1917:99340#42/1
CTTAAGTATTTTGAAAGTTAACATAAGTTATTCTCAGAGAGACTGCTTTT
+
@@AHFF?EEDEAF?FEEGEFD?GGFEFGECGE?9H?EEABFAG9@CDGGF
@ERR294379.97291341 HS24_09441:8:2201:10397:52549#42/1
GGCTGCCATCAGTGAGCAAGTAAGAATTTGCAGAAATTTATTAGCACACT
+
CDAF<FFDEHEFDDFEEFDGDFCHD=GHG<GEDHDGJFHEFFGEFEE@GH'''
from io import StringIO
parse_fastq(StringIO(fastq_string))
def phred33_to_q(qual):
Turn Phred+33 ASCII-encoded quality into Phred-scaled integer
return ord(qual)-33
def q_to_phred33(Q):
Turn Phred-scaled integer into Phred+33 ASCII-encoded quality
return chr(Q + 33)
def q_to_p(Q):
Turn Phred-scaled integer into error probability
return 10.0 ** (-0.1 * Q)
def p_to_q(p):
Turn error probability into Phred-scaled integer
import math
return int(round(-10.0 * math.log10(p)))
# Here are the examples I discussed above
# Convert Qs into ps
q_to_p(30), q_to_p(20), q_to_p(10)
p_to_q(0.00011) # note that result is rounded
q_to_phred33(30), q_to_phred33(20)
# Take the first read from the small example above
name, seq, qual = parse_fastq(StringIO(fastq_string))[0]
q_string = list(map(phred33_to_q, qual))
p_string = list(map(q_to_p, q_string))
print(q_string)
print(p_string)
# Let's just make a random genome of length 1K
import random
random.seed(637485)
genome = ''.join([random.choice('ACGT') for _ in range(1000)])
genome
# The sequencer draws a fragment from the genome of length, say, 250
offset = random.randint(0, len(genome) - 250)
fragment = genome[offset:offset+250]
fragment
# Then it reads sequences from either end of the fragment
end1, end2 = fragment[:75], fragment[-75:]
end1, end2
# And because of how the whole biochemical process works, the
# second end is always from the opposite strand from the first.
import string
# function for reverse-complementing
_revcomp_trans = str.maketrans("ACGTacgt", "TGCAtgca")
def reverse_complement(s):
return s[::-1].translate(_revcomp_trans)
end2 = reverse_complement(end2)
end1, end2
def parse_paired_fastq(fh1, fh2):
Parse paired-end reads from a pair of FASTQ filehandles
For each pair, we return a name, the nucleotide string
for the first end, the quality string for the first end,
the nucleotide string for the second end, and the
quality string for the second end.
reads = []
while True:
first_line_1, first_line_2 = fh1.readline(), fh2.readline()
if len(first_line_1) == 0:
break # end of file
name_1, name_2 = first_line_1[1:].rstrip(), first_line_2[1:].rstrip()
seq_1, seq_2 = fh1.readline().rstrip(), fh2.readline().rstrip()
fh1.readline() # ignore line starting with +
fh2.readline() # ignore line starting with +
qual_1, qual_2 = fh1.readline().rstrip(), fh2.readline().rstrip()
reads.append(((name_1, seq_1, qual_1), (name_2, seq_2, qual_2)))
return reads
fastq_string1 = '''@509.6.64.20524.149722/1
AGCTCTGGTGACCCATGGGCAGCTGCTAGGGAGCCTTCTCTCCACCCTGA
+
HHHHHHHGHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHIIHHIHFHHF
@509.4.62.19231.2763/1
GTTGATAAGCAAGCATCTCATTTTGTGCATATACCTGGTCTTTCGTATTC
+
HHHHHHHHHHHHHHEHHHHHHHHHHHHHHHHHHHHHHHDHHHHHHGHGHH'''
fastq_string2 = '''@509.6.64.20524.149722/2
TAAGTCAGGATACTTTCCCATATCCCAGCCCTGCTCCNTCTTTAAATAAT
+
HHHHHHHHHHHHHHHHHHHH@HHFHHHEFHHHHHHFF#FFFFFFFHHHHH
@509.4.62.19231.2763/2
CTCTGCTGGTATGGTTGACGCCGGATTTGAGAATCAANAAGAGCTTACTA
+
HHHHHHHHHHHHHHHHHHEHEHHHFHGHHHHHHHH>@#@=44465HHHHH'''
parse_paired_fastq(StringIO(fastq_string1), StringIO(fastq_string2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: FASTQ
Step6: The nucleotide string can sometimes contain the character "N". N essentially means "no confidence." The sequencer knows there's a nucleotide there but doesn't know whether it's an A, C, G or T.
Step7: To convert an entire string Phred33-encoded quality values into the corresponding Q or p values, I can do the following
Step8: You might wonder how the sequencer and its software can know the probability that a nucleotide is incorrected called. It can't; this number is just an estimate. To describe exactly how it's estimated is beyond the scope of this notebook; if you're interested, search for academic papers with "base calling" in the title. Here's a helpful video by Rafa Irizarry.
Step10: FASTQ can be used to store paired-end reads. Say we have 1000 paired-end reads. We should store them in a pair of FASTQ files. The first FASTQ file (say, reads_1.fq) would contain all of the first ends and the second FASTQ file (say, reads_2.fq) would contain all of the second ends. In both files, the ends would appear in corresponding order. That is, the first entry in reads_1.fq is paired with the first entry in reads_2.fq and so on.
|
7,645
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, SkewT
from metpy.units import units
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'), how='all'
).reset_index(drop=True)
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot.
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Calculate LCL height and plot as black dot. Because `p`'s first value is
# ~1000 mb and its last value is ~250 mb, the `0` index is selected for
# `p`, `T`, and `Td` to lift the parcel from the surface. If `p` was inverted,
# i.e. start from low value, 250 mb, to a high value, 1000 mb, the `-1` index
# should be selected.
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Calculate full parcel profile and add to plot as black line
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
skew.plot(p, prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, prof)
skew.shade_cape(p, T, prof)
# An example of a slanted line at constant T -- in this case the 0
# isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Upper air data can be obtained using the siphon package, but for this example we will use
Step2: We will pull the data out of the example dataset into individual variables and
Step3: Create a new figure. The dimensions here give a good aspect ratio.
|
7,646
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
## Your code here
train_words = # The final subsampled word list
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
return
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
inputs =
labels =
n_vocab = len(int_to_vocab)
n_embedding = # Number of embedding features
with train_graph.as_default():
embedding = # create embedding weight matrix here
embed = # use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = # create softmax weight matrix here
softmax_b = # create softmax biases here
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to convert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
7,647
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.preprocessing import compute_proj_ecg
from mne_connectivity import envelope_correlation
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=150).resample(100).pick('meg')
ecg_proj, _ = compute_proj_ecg(raw, ch_name='MEG 0511') # No ECG chan
raw.add_proj(ecg_proj)
raw.apply_proj()
epochs = mne.make_fixed_length_epochs(raw, duration=30, preload=False)
event_related_plot = epochs.plot_image(picks=['MEG 1142'])
epochs.load_data().filter(l_freq=8, h_freq=12)
alpha_data = epochs.get_data()
corr_matrix = envelope_correlation(alpha_data).get_data()
print(corr_matrix.shape)
first_30 = corr_matrix[0]
last_30 = corr_matrix[-1]
corr_matrices = [first_30, last_30]
color_lims = np.percentile(np.array(corr_matrices), [5, 95])
titles = ['First 30 Seconds', 'Last 30 Seconds']
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.suptitle('Correlation Matrices from First 30 Seconds and Last 30 Seconds')
for ci, corr_matrix in enumerate(corr_matrices):
ax = axes[ci]
mpbl = ax.imshow(corr_matrix, clim=color_lims)
ax.set_xlabel(titles[ci])
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.2, 0.025, 0.6])
cbar = fig.colorbar(ax.images[0], cax=cax)
cbar.set_label('Correlation Coefficient')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this tutorial we'll crop and resample the raw data to a manageable size
Step2: To create fixed length epochs, we simply call the function and provide it
Step3: Characteristics of Fixed Length Epochs
Step4: For information about creating epochs for event-related analyses, please see
Step5: If desired, separate correlation matrices for each epoch can be obtained.
Step6: Now we can plot correlation matrices. We'll compare the first and last
|
7,648
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import math
import matplotlib.pyplot as plt
%matplotlib inline
from vega_datasets import data
data.list_datasets()
from vega_datasets import local_data
local_data.list_datasets()
local_data.anscombe.description
df = local_data.anscombe()
df.head()
# TODO: put your code here
car_df = local_data.cars()
car_df.head()
car_df.describe()
car_df.hist()
# TODO: put your code here
import random
import time
import numpy as np
l_short_bar = 1
l_long_bar = 10
perceived_length_list = []
actual_length_list = []
mystery_length = random.uniform(1, 10) # generate a number between 1 and 10. this is the *actual* length.
plt.barh(np.arange(3), [l_short_bar, mystery_length, l_long_bar], align='center')
plt.yticks(np.arange(3), ('1', '?', '10'))
plt.xticks([]) # no hint!
np.arange(3)
def run_exp_once():
mystery_length = random.uniform(1, 10) # generate a number between 1 and 10.
plt.barh(np.arange(3), [l_short_bar, mystery_length, l_long_bar], height=0.5, align='center')
plt.yticks(np.arange(3), ('1', '?', '10'))
plt.xticks([]) # no hint!
plt.show()
try:
perceived_length_list.append( float(input()) )
except:
print("This should only fail in workflow. If you are running this in browser, this won't fail.")
pass
actual_length_list.append(mystery_length)
run_exp_once()
# TODO: Run your experiment many times here
plt.scatter(x=[1,5,10], y=[1,10, 5])
# TODO: put your code here
from scipy.optimize import curve_fit
def func(x, a, b, c):
return a * np.power(x, b) + c
x = np.arange(20) # [0,1,2,3, ..., 19]
y = np.power(x, 2) # [0,1,4,9, ... ]
popt, pcov = curve_fit(func, x, y)
print('{:.2f} x^{:.2f} + {:.2f}'.format(*popt))
# TODO: your code here
n1 = 0.005
n2 = 0.05
radius1 = np.sqrt(n1/np.pi) # area = pi * r * r
radius2 = np.sqrt(n2/np.pi)
random_radius = np.sqrt(n1*random.uniform(1,10)/np.pi)
plt.axis('equal')
plt.axis('off')
circ1 = plt.Circle( (0,0), radius1, clip_on=False )
circ2 = plt.Circle( (4*radius2,0), radius2, clip_on=False )
rand_circ = plt.Circle((2*radius2,0), random_radius, clip_on=False )
plt.gca().add_artist(circ1)
plt.gca().add_artist(circ2)
plt.gca().add_artist(rand_circ)
perceived_area_list = []
actual_area_list = []
def run_area_exp_once(n1=0.005, n2=0.05):
radius1 = np.sqrt(n1/np.pi) # area = pi * r * r
radius2 = np.sqrt(n2/np.pi)
mystery_number = random.uniform(1,10)
random_radius = np.sqrt(n1*mystery_number/math.pi)
plt.axis('equal')
plt.axis('off')
circ1 = plt.Circle( (0,0), radius1, clip_on=False )
circ2 = plt.Circle( (4*radius2,0), radius2, clip_on=False )
rand_circ = plt.Circle((2*radius2,0), random_radius, clip_on=False )
plt.gca().add_artist(circ1)
plt.gca().add_artist(circ2)
plt.gca().add_artist(rand_circ)
plt.show()
perceived_area_list.append( float(input()) )
actual_area_list.append(mystery_number)
# TODO: put your code here. You can use multiple cells.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vega datasets
Step2: or you can work with only smaller, local datasets.
Step3: Ah, we have the anscombe data here! Let's see the description of the dataset.
Step4: Anscombe's quartet dataset
Step5: Q1
Step6: Some histograms with pandas
Step7: Pandas provides useful summary functions. It identifies numerical data columns and provides you with a table of summary statistics.
Step8: If you ask to draw a histogram, you get all of them.
Step9: Well this is too small. You can check out the documentation and change the size of the figure.
Step10: Your own psychophysics experiment!
Step11: Perception of length
Step12: Btw, np.arange is used to create a simple integer list [0, 1, 2].
Step13: Now let's define a function to perform the experiment once. When you run this function, it picks a random number between 1.0 and 10.0 and show the bar chart. Then it asks you to input your estimate of the length of the middle bar. It then saves that number to the perceived_length_list and the actual answer to the actual_length_list.
Step14: Now, run the experiment many times to gather your data. Check the two lists to make sure that you have the proper dataset. The length of the two lists should be the same.
Step15: Plotting the result
Step16: Q3
Step17: After plotting, let's fit the relation between actual and perceived lengths using a polynomial function. We can easily do it using curve_fit(f, x, y) in Scipy, which is to fit $x$ and $y$ using the function f. In our case, $f = a*x^b +c$. For instance, we can check whether this works by creating a fake dataset that follows the exact form
Step18: In order to plot the function to check the relationship between the actual and perceived lenghts, you can use two variables x and y to plot the relationship where x equals to a series of continuous numbers. For example, if your x axis ranges from 1 to 9 then the variable x could be equal to np.linspace(1, 10, 50). The variable y will contain the equation that you get from popt. For example, if you get equation 1.00 x^2.00 + 0.00 then the variable y would be equal to 1.0 * x**2.0 + 0.
Step19: Perception of area
Step20: Let's have two lists for this experiment.
Step21: And define a function for the experiment.
Step22: Q5
|
7,649
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'thu', 'sandbox-1', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
7,650
|
<ASSISTANT_TASK:>
Python Code:
import chaospy
uniform = chaospy.Uniform(0, 4)
chaospy.E(uniform)
samples = uniform.sample(1e7)
numpy.mean(samples)
q0 = chaospy.variable()
chaospy.E(q0**3-1, uniform)
q0, q1, q2 = chaospy.variable(3)
joint3 = chaospy.J(chaospy.Normal(0, 1), chaospy.Uniform(0, 2), chaospy.Normal(2, 2))
chaospy.E([q0, q1*q2], joint3)
chaospy.Corr([q0, q0*q2], joint3)
chaospy.E_cond([q0, q1*q2], q0, joint3)
chaospy.E_cond([q0, q1*q2], q1, joint3)
chaospy.E_cond([q0, q1*q2], [q1, q2], joint3)
chaospy.Sens_m(6*q0+3*q1+q2, joint3)
chaospy.Sens_m2(q0*q1+q1*q2, joint3)
chaospy.Sens_t(6*q0+3*q1+q2, joint3)
chaospy.Perc([q0, q1*q2], [25, 50, 75], joint3, sample=1000, seed=1234)
new_dist = chaospy.QoI_Dist(q0*q1+q2, joint3)
new_dist.sample(6, seed=1234).round(6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Its empirical counterpart is the mean function
Step2: The operator can also be used on any polynomial, but would then require the distribution of interest as a second argument
Step3: In the multivariate case, the distribution and the polynomials needs to coincide politically.
Step4: Here q0, q1 and q2 correspond to chaospy.Normal(0, 1), chaospy.Uniform(0, 2) and chaospy.Normal(2, 2) respectively.
Step5: Conditional mean
Step6: Sensitivity analysis
Step7: There are no direct empirical counterparts to these functions, but it is possible to create schemes using for example Saltelli's method.
Step8: Note that the accuracy of this method is dependent on the number of samples.
|
7,651
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function, unicode_literals
[10.5, 5.2, 3.25, 7.0]
import numpy as np
video = np.array([10.5, 5.2, 3.25, 7.0])
video
video.size
video[2] # 3rd element
%matplotlib inline
import matplotlib.pyplot as plt
u = np.array([2, 5])
v = np.array([3, 1])
x_coords, y_coords = zip(u, v)
plt.scatter(x_coords, y_coords, color=["r","b"])
plt.axis([0, 9, 0, 6])
plt.grid()
plt.show()
def plot_vector2d(vector2d, origin=[0, 0], **options):
return plt.arrow(origin[0], origin[1], vector2d[0], vector2d[1],
head_width=0.2, head_length=0.3, length_includes_head=True,
**options)
plot_vector2d(u, color="r")
plot_vector2d(v, color="b")
plt.axis([0, 9, 0, 6])
plt.grid()
plt.show()
a = np.array([1, 2, 8])
b = np.array([5, 6, 3])
from mpl_toolkits.mplot3d import Axes3D
subplot3d = plt.subplot(111, projection='3d')
x_coords, y_coords, z_coords = zip(a,b)
subplot3d.scatter(x_coords, y_coords, z_coords)
subplot3d.set_zlim3d([0, 9])
plt.show()
def plot_vectors3d(ax, vectors3d, z0, **options):
for v in vectors3d:
x, y, z = v
ax.plot([x,x], [y,y], [z0, z], color="gray", linestyle='dotted', marker=".")
x_coords, y_coords, z_coords = zip(*vectors3d)
ax.scatter(x_coords, y_coords, z_coords, **options)
subplot3d = plt.subplot(111, projection='3d')
subplot3d.set_zlim([0, 9])
plot_vectors3d(subplot3d, [a,b], 0, color=("r","b"))
plt.show()
def vector_norm(vector):
squares = [element**2 for element in vector]
return sum(squares)**0.5
print("||", u, "|| =")
vector_norm(u)
import numpy.linalg as LA
LA.norm(u)
radius = LA.norm(u)
plt.gca().add_artist(plt.Circle((0,0), radius, color="#DDDDDD"))
plot_vector2d(u, color="red")
plt.axis([0, 8.7, 0, 6])
plt.grid()
plt.show()
print(" ", u)
print("+", v)
print("-"*10)
u + v
plot_vector2d(u, color="r")
plot_vector2d(v, color="b")
plot_vector2d(v, origin=u, color="b", linestyle="dotted")
plot_vector2d(u, origin=v, color="r", linestyle="dotted")
plot_vector2d(u+v, color="g")
plt.axis([0, 9, 0, 7])
plt.text(0.7, 3, "u", color="r", fontsize=18)
plt.text(4, 3, "u", color="r", fontsize=18)
plt.text(1.8, 0.2, "v", color="b", fontsize=18)
plt.text(3.1, 5.6, "v", color="b", fontsize=18)
plt.text(2.4, 2.5, "u+v", color="g", fontsize=18)
plt.grid()
plt.show()
t1 = np.array([2, 0.25])
t2 = np.array([2.5, 3.5])
t3 = np.array([1, 2])
x_coords, y_coords = zip(t1, t2, t3, t1)
plt.plot(x_coords, y_coords, "c--", x_coords, y_coords, "co")
plot_vector2d(v, t1, color="r", linestyle=":")
plot_vector2d(v, t2, color="r", linestyle=":")
plot_vector2d(v, t3, color="r", linestyle=":")
t1b = t1 + v
t2b = t2 + v
t3b = t3 + v
x_coords_b, y_coords_b = zip(t1b, t2b, t3b, t1b)
plt.plot(x_coords_b, y_coords_b, "b-", x_coords_b, y_coords_b, "bo")
plt.text(4, 4.2, "v", color="r", fontsize=18)
plt.text(3, 2.3, "v", color="r", fontsize=18)
plt.text(3.5, 0.4, "v", color="r", fontsize=18)
plt.axis([0, 6, 0, 5])
plt.grid()
plt.show()
print("1.5 *", u, "=")
1.5 * u
k = 2.5
t1c = k * t1
t2c = k * t2
t3c = k * t3
plt.plot(x_coords, y_coords, "c--", x_coords, y_coords, "co")
plot_vector2d(t1, color="r")
plot_vector2d(t2, color="r")
plot_vector2d(t3, color="r")
x_coords_c, y_coords_c = zip(t1c, t2c, t3c, t1c)
plt.plot(x_coords_c, y_coords_c, "b-", x_coords_c, y_coords_c, "bo")
plot_vector2d(k * t1, color="b", linestyle=":")
plot_vector2d(k * t2, color="b", linestyle=":")
plot_vector2d(k * t3, color="b", linestyle=":")
plt.axis([0, 9, 0, 9])
plt.grid()
plt.show()
plt.gca().add_artist(plt.Circle((0,0),1,color='c'))
plt.plot(0, 0, "ko")
plot_vector2d(v / LA.norm(v), color="k")
plot_vector2d(v, color="b", linestyle=":")
plt.text(0.3, 0.3, "$\hat{u}$", color="k", fontsize=18)
plt.text(1.5, 0.7, "$u$", color="b", fontsize=18)
plt.axis([-1.5, 5.5, -1.5, 3.5])
plt.grid()
plt.show()
def dot_product(v1, v2):
return sum(v1i * v2i for v1i, v2i in zip(v1, v2))
dot_product(u, v)
np.dot(u,v)
u.dot(v)
print(" ",u)
print("* ",v, "(NOT a dot product)")
print("-"*10)
u * v
def vector_angle(u, v):
cos_theta = u.dot(v) / LA.norm(u) / LA.norm(v)
return np.arccos(np.clip(cos_theta, -1, 1))
theta = vector_angle(u, v)
print("Angle =", theta, "radians")
print(" =", theta * 180 / np.pi, "degrees")
u_normalized = u / LA.norm(u)
proj = v.dot(u_normalized) * u_normalized
plot_vector2d(u, color="r")
plot_vector2d(v, color="b")
plot_vector2d(proj, color="k", linestyle=":")
plt.plot(proj[0], proj[1], "ko")
plt.plot([proj[0], v[0]], [proj[1], v[1]], "b:")
plt.text(1, 2, "$proj_u v$", color="k", fontsize=18)
plt.text(1.8, 0.2, "$v$", color="b", fontsize=18)
plt.text(0.8, 3, "$u$", color="r", fontsize=18)
plt.axis([0, 8, 0, 5.5])
plt.grid()
plt.show()
[
[10, 20, 30],
[40, 50, 60]
]
A = np.array([
[10,20,30],
[40,50,60]
])
A
A.shape
A.size
A[1,2] # 2nd row, 3rd column
A[1, :] # 2nd row vector (as a 1D array)
A[:, 2] # 3rd column vector (as a 1D array)
A[1:2, :] # rows 2 to 3 (excluded): this returns row 2 as a one-row matrix
A[:, 2:3] # columns 3 to 4 (excluded): this returns column 3 as a one-column matrix
np.diag([4, 5, 6])
D = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
])
np.diag(D)
np.eye(3)
B = np.array([[1,2,3], [4, 5, 6]])
B
A
A + B
B + A
C = np.array([[100,200,300], [400, 500, 600]])
A + (B + C)
(A + B) + C
2 * A
A * 2
2 * (3 * A)
(2 * 3) * A
2 * (A + B)
2 * A + 2 * B
D = np.array([
[ 2, 3, 5, 7],
[11, 13, 17, 19],
[23, 29, 31, 37]
])
E = A.dot(D)
E
40*5 + 50*17 + 60*31
E[1,2] # row 2, column 3
try:
D.dot(A)
except ValueError as e:
print("ValueError:", e)
F = np.array([
[5,2],
[4,1],
[9,3]
])
A.dot(F)
F.dot(A)
G = np.array([
[8, 7, 4, 2, 5],
[2, 5, 1, 0, 5],
[9, 11, 17, 21, 0],
[0, 1, 0, 1, 2]])
A.dot(D).dot(G) # (AB)G
A.dot(D.dot(G)) # A(BG)
(A + B).dot(D)
A.dot(D) + B.dot(D)
A.dot(np.eye(3))
np.eye(2).dot(A)
A * B # NOT a matrix multiplication
import sys
print("Python version: {}.{}.{}".format(*sys.version_info))
print("Numpy version:", np.version.version)
# Uncomment the following line if your Python version is ≥3.5
# and your NumPy version is ≥1.10:
#A @ D
A
A.T
A.T.T
(A + B).T
A.T + B.T
(A.dot(D)).T
D.T.dot(A.T)
D.dot(D.T)
u
u.T
u_row = np.array([u])
u_row
u[np.newaxis, :]
u[np.newaxis]
u[None]
u_row.T
u[:, np.newaxis]
P = np.array([
[3.0, 4.0, 1.0, 4.6],
[0.2, 3.5, 2.0, 0.5]
])
x_coords_P, y_coords_P = P
plt.scatter(x_coords_P, y_coords_P)
plt.axis([0, 5, 0, 4])
plt.show()
plt.plot(x_coords_P, y_coords_P, "bo")
plt.plot(x_coords_P, y_coords_P, "b--")
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
from matplotlib.patches import Polygon
plt.gca().add_artist(Polygon(P.T))
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
H = np.array([
[ 0.5, -0.2, 0.2, -0.1],
[ 0.4, 0.4, 1.5, 0.6]
])
P_moved = P + H
plt.gca().add_artist(Polygon(P.T, alpha=0.2))
plt.gca().add_artist(Polygon(P_moved.T, alpha=0.3, color="r"))
for vector, origin in zip(H.T, P.T):
plot_vector2d(vector, origin=origin)
plt.text(2.2, 1.8, "$P$", color="b", fontsize=18)
plt.text(2.0, 3.2, "$P+H$", color="r", fontsize=18)
plt.text(2.5, 0.5, "$H_{*,1}$", color="k", fontsize=18)
plt.text(4.1, 3.5, "$H_{*,2}$", color="k", fontsize=18)
plt.text(0.4, 2.6, "$H_{*,3}$", color="k", fontsize=18)
plt.text(4.4, 0.2, "$H_{*,4}$", color="k", fontsize=18)
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
H2 = np.array([
[-0.5, -0.5, -0.5, -0.5],
[ 0.4, 0.4, 0.4, 0.4]
])
P_translated = P + H2
plt.gca().add_artist(Polygon(P.T, alpha=0.2))
plt.gca().add_artist(Polygon(P_translated.T, alpha=0.3, color="r"))
for vector, origin in zip(H2.T, P.T):
plot_vector2d(vector, origin=origin)
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
P + [[-0.5], [0.4]] # same as P + H2, thanks to NumPy broadcasting
def plot_transformation(P_before, P_after, text_before, text_after, axis = [0, 5, 0, 4], arrows=False):
if arrows:
for vector_before, vector_after in zip(P_before.T, P_after.T):
plot_vector2d(vector_before, color="blue", linestyle="--")
plot_vector2d(vector_after, color="red", linestyle="-")
plt.gca().add_artist(Polygon(P_before.T, alpha=0.2))
plt.gca().add_artist(Polygon(P_after.T, alpha=0.3, color="r"))
plt.text(P_before[0].mean(), P_before[1].mean(), text_before, fontsize=18, color="blue")
plt.text(P_after[0].mean(), P_after[1].mean(), text_after, fontsize=18, color="red")
plt.axis(axis)
plt.grid()
P_rescaled = 0.60 * P
plot_transformation(P, P_rescaled, "$P$", "$0.6 P$", arrows=True)
plt.show()
U = np.array([[1, 0]])
U.dot(P)
def plot_projection(U, P):
U_P = U.dot(P)
axis_end = 100 * U
plot_vector2d(axis_end[0], color="black")
plt.gca().add_artist(Polygon(P.T, alpha=0.2))
for vector, proj_coordinate in zip(P.T, U_P.T):
proj_point = proj_coordinate * U
plt.plot(proj_point[0][0], proj_point[0][1], "ro")
plt.plot([vector[0], proj_point[0][0]], [vector[1], proj_point[0][1]], "r--")
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
plot_projection(U, P)
angle30 = 30 * np.pi / 180 # angle in radians
U_30 = np.array([[np.cos(angle30), np.sin(angle30)]])
plot_projection(U_30, P)
angle120 = 120 * np.pi / 180
V = np.array([
[np.cos(angle30), np.sin(angle30)],
[np.cos(angle120), np.sin(angle120)]
])
V
V.dot(P)
P_rotated = V.dot(P)
plot_transformation(P, P_rotated, "$P$", "$VP$", [-2, 6, -2, 4], arrows=True)
plt.show()
F_shear = np.array([
[1, 1.5],
[0, 1]
])
plot_transformation(P, F_shear.dot(P), "$P$", "$F_{shear} P$",
axis=[0, 10, 0, 7])
plt.show()
Square = np.array([
[0, 0, 1, 1],
[0, 1, 1, 0]
])
plot_transformation(Square, F_shear.dot(Square), "$Square$", "$F_{shear} Square$",
axis=[0, 2.6, 0, 1.8])
plt.show()
F_squeeze = np.array([
[1.4, 0],
[0, 1/1.4]
])
plot_transformation(P, F_squeeze.dot(P), "$P$", "$F_{squeeze} P$",
axis=[0, 7, 0, 5])
plt.show()
plot_transformation(Square, F_squeeze.dot(Square), "$Square$", "$F_{squeeze} Square$",
axis=[0, 1.8, 0, 1.2])
plt.show()
F_reflect = np.array([
[1, 0],
[0, -1]
])
plot_transformation(P, F_reflect.dot(P), "$P$", "$F_{reflect} P$",
axis=[-2, 9, -4.5, 4.5])
plt.show()
F_inv_shear = np.array([
[1, -1.5],
[0, 1]
])
P_sheared = F_shear.dot(P)
P_unsheared = F_inv_shear.dot(P_sheared)
plot_transformation(P_sheared, P_unsheared, "$P_{sheared}$", "$P_{unsheared}$",
axis=[0, 10, 0, 7])
plt.plot(P[0], P[1], "b--")
plt.show()
F_inv_shear = LA.inv(F_shear)
F_inv_shear
plt.plot([0, 0, 1, 1, 0, 0.1, 0.1, 0, 0.1, 1.1, 1.0, 1.1, 1.1, 1.0, 1.1, 0.1],
[0, 1, 1, 0, 0, 0.1, 1.1, 1.0, 1.1, 1.1, 1.0, 1.1, 0.1, 0, 0.1, 0.1],
"r-")
plt.axis([-0.5, 2.1, -0.5, 1.5])
plt.show()
F_project = np.array([
[1, 0],
[0, 0]
])
plot_transformation(P, F_project.dot(P), "$P$", "$F_{project} \cdot P$",
axis=[0, 6, -1, 4])
plt.show()
try:
LA.inv(F_project)
except LA.LinAlgError as e:
print("LinAlgError:", e)
angle30 = 30 * np.pi / 180
F_project_30 = np.array([
[np.cos(angle30)**2, np.sin(2*angle30)/2],
[np.sin(2*angle30)/2, np.sin(angle30)**2]
])
plot_transformation(P, F_project_30.dot(P), "$P$", "$F_{project\_30} \cdot P$",
axis=[0, 6, -1, 4])
plt.show()
LA.inv(F_project_30)
F_shear.dot(LA.inv(F_shear))
LA.inv(LA.inv(F_shear))
F_involution = np.array([
[0, -2],
[-1/2, 0]
])
plot_transformation(P, F_involution.dot(P), "$P$", "$F_{involution} \cdot P$",
axis=[-8, 5, -4, 4])
plt.show()
F_reflect.dot(F_reflect.T)
M = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 0]
])
LA.det(M)
LA.det(F_project)
LA.det(F_project_30)
LA.det(F_shear)
F_scale = np.array([
[0.5, 0],
[0, 0.5]
])
plot_transformation(P, F_scale.dot(P), "$P$", "$F_{scale} \cdot P$",
axis=[0, 6, -1, 4])
plt.show()
LA.det(F_scale)
LA.det(F_reflect)
P_squeezed_then_sheared = F_shear.dot(F_squeeze.dot(P))
P_squeezed_then_sheared = (F_shear.dot(F_squeeze)).dot(P)
F_squeeze_then_shear = F_shear.dot(F_squeeze)
P_squeezed_then_sheared = F_squeeze_then_shear.dot(P)
LA.inv(F_shear.dot(F_squeeze)) == LA.inv(F_squeeze).dot(LA.inv(F_shear))
U, S_diag, V_T = LA.svd(F_shear) # note: in python 3 you can rename S_diag to Σ_diag
U
S_diag
S = np.diag(S_diag)
S
U.dot(np.diag(S_diag)).dot(V_T)
F_shear
plot_transformation(Square, V_T.dot(Square), "$Square$", "$V^T \cdot Square$",
axis=[-0.5, 3.5 , -1.5, 1.5])
plt.show()
plot_transformation(V_T.dot(Square), S.dot(V_T).dot(Square), "$V^T \cdot Square$", "$\Sigma \cdot V^T \cdot Square$",
axis=[-0.5, 3.5 , -1.5, 1.5])
plt.show()
plot_transformation(S.dot(V_T).dot(Square), U.dot(S).dot(V_T).dot(Square),"$\Sigma \cdot V^T \cdot Square$", "$U \cdot \Sigma \cdot V^T \cdot Square$",
axis=[-0.5, 3.5 , -1.5, 1.5])
plt.show()
eigenvalues, eigenvectors = LA.eig(F_squeeze)
eigenvalues # [λ0, λ1, …]
eigenvectors # [v0, v1, …]
eigenvalues2, eigenvectors2 = LA.eig(F_shear)
eigenvalues2 # [λ0, λ1, …]
eigenvectors2 # [v0, v1, …]
D = np.array([
[100, 200, 300],
[ 10, 20, 30],
[ 1, 2, 3],
])
np.trace(D)
np.trace(F_project)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vectors
Step2: Since we plan to do quite a lot of scientific calculations, it is much better to use NumPy's ndarray, which provides a lot of convenient and optimized implementations of essential mathematical operations on vectors (for more details about NumPy, check out the NumPy tutorial). For example
Step3: The size of a vector can be obtained using the size attribute
Step4: The $i^{th}$ element (also called entry or item) of a vector $\textbf{v}$ is noted $\textbf{v}_i$.
Step5: Plotting vectors
Step6: 2D vectors
Step7: These vectors each have 2 elements, so they can easily be represented graphically on a 2D graph, for example as points
Step8: Vectors can also be represented as arrows. Let's create a small convenience function to draw nice arrows
Step9: Now let's draw the vectors u and v as arrows
Step10: 3D vectors
Step12: Now let's plot them using matplotlib's Axes3D
Step14: It is a bit hard to visualize exactly where in space these two points are, so let's add vertical lines. We'll create a small convenience function to plot a list of 3d vectors with vertical lines attached
Step15: Norm
Step16: However, it is much more efficient to use NumPy's norm function, available in the linalg (Linear Algebra) module
Step17: Let's plot a little diagram to confirm that the length of vector $\textbf{v}$ is indeed $\approx5.4$
Step18: Looks about right!
Step19: Let's look at what vector addition looks like graphically
Step20: Vector addition is commutative, meaning that $\textbf{u} + \textbf{v} = \textbf{v} + \textbf{u}$. You can see it on the previous image
Step21: Finally, substracting a vector is like adding the opposite vector.
Step22: Graphically, scalar multiplication results in changing the scale of a figure, hence the name scalar. The distance from the origin (the point at coordinates equal to zero) is also multiplied by the scalar. For example, let's scale up by a factor of k = 2.5
Step23: As you might guess, dividing a vector by a scalar is equivalent to multiplying by its inverse
Step24: Dot product
Step25: But a much more efficient implementation is provided by NumPy with the dot function
Step26: Equivalently, you can use the dot method of ndarrays
Step27: Caution
Step28: Main properties
Step29: Note
Step30: Matrices
Step31: A much more efficient way is to use the NumPy library which provides optimized implementations of many matrix operations
Step32: By convention matrices generally have uppercase names, such as $A$.
Step33: Caution
Step34: Element indexing
Step35: The $i^{th}$ row vector is sometimes noted $M_i$ or $M_{i,}$, but again there is no standard notation so people often prefer to explicitely define their own names, for example
Step36: Similarly, the $j^{th}$ column vector is sometimes noted $M^j$ or $M_{,j}$, but there is no standard notation. We will use $M_{,j}$. For example, to access $A_{*,3}$ (ie. $A$'s 3rd column vector)
Step37: Note that the result is actually a one-dimensional NumPy array
Step38: Square, triangular, diagonal and identity matrices
Step39: If you pass a matrix to the diag function, it will happily extract the diagonal values
Step40: Finally, the identity matrix of size $n$, noted $I_n$, is a diagonal matrix of size $n \times n$ with $1$'s in the main diagonal, for example $I_3$
Step41: The identity matrix is often noted simply $I$ (instead of $I_n$) when its size is clear given the context. It is called the identity matrix because multiplying a matrix with it leaves the matrix unchanged as we will see below.
Step42: Addition is commutative, meaning that $A + B = B + A$
Step43: It is also associative, meaning that $A + (B + C) = (A + B) + C$
Step44: Scalar multiplication
Step45: Scalar multiplication is also defined on the right hand side, and gives the same result
Step46: This makes scalar multiplication commutative.
Step47: Finally, it is distributive over addition of matrices, meaning that $\lambda (Q + R) = \lambda Q + \lambda R$
Step48: Matrix multiplication
Step49: Let's check this result by looking at one element, just to be sure
Step50: Looks good! You can check the other elements until you get used to the algorithm.
Step51: This illustrates the fact that matrix multiplication is NOT commutative
Step52: On the other hand, matrix multiplication is associative, meaning that $Q(RS) = (QR)S$. Let's create a $4 \times 5$ matrix $G$ to illustrate this
Step53: It is also distributive over addition of matrices, meaning that $(Q + R)S = QS + RS$. For example
Step54: The product of a matrix $M$ by the identity matrix (of matching size) results in the same matrix $M$. More formally, if $M$ is an $m \times n$ matrix, then
Step55: Caution
Step56: The @ infix operator
Step57: Note
Step58: As you might expect, transposing a matrix twice returns the original matrix
Step59: Transposition is distributive over addition of matrices, meaning that $(Q + R)^T = Q^T + R^T$. For example
Step60: Moreover, $(Q \cdot R)^T = R^T \cdot Q^T$. Note that the order is reversed. For example
Step61: A symmetric matrix $M$ is defined as a matrix that is equal to its transpose
Step62: Converting 1D arrays to 2D arrays in NumPy
Step63: We want to convert $\textbf{u}$ into a row vector before transposing it. There are a few ways to do this
Step64: Notice the extra square brackets
Step65: This quite explicit
Step66: This is equivalent, but a little less explicit.
Step67: This is the shortest version, but you probably want to avoid it because it is unclear. The reason it works is that np.newaxis is actually equal to None, so this is equivalent to the previous version.
Step68: Great! We now have a nice column vector.
Step69: Plotting a matrix
Step70: Of course we could also have stored the same 4 vectors as row vectors instead of column vectors, resulting in a $4 \times 2$ matrix (the transpose of $P$, in fact). It is really an arbitrary choice.
Step71: Or you can represent it as a polygon
Step72: Geometric applications of matrix operations
Step73: If we add a matrix full of identical vectors, we get a simple geometric translation
Step74: Although matrices can only be added together if they have the same size, NumPy allows adding a row vector or a column vector to a matrix
Step75: Scalar multiplication
Step76: Matrix multiplication – Projection onto an axis
Step77: Now let's look at the dot product $P \cdot U$
Step78: These are the horizontal coordinates of the vectors in $P$. In other words, we just projected $P$ onto the horizontal axis
Step79: We can actually project on any other axis by just replacing $U$ with any other unit vector. For example, let's project on the axis that is at a 30° angle above the horizontal axis
Step80: Good! Remember that the dot product of a unit vector and a matrix basically performs a projection on an axis and gives us the coordinates of the resulting points on that axis.
Step81: Let's look at the product $VP$
Step82: The first row is equal to $V_{1,} P$, which is the coordinates of the projection of $P$ onto the 30° axis, as we have seen above. The second row is $V_{2,} P$, which is the coordinates of the projection of $P$ onto the 120° axis. So basically we obtained the coordinates of $P$ after rotating the horizontal and vertical axes by 30° (or equivalently after rotating the polygon by -30° around the origin)! Let's plot $VP$ to see this
Step83: Matrix $V$ is called a rotation matrix.
Step84: Let's look at how this transformation affects the unit square
Step85: Now let's look at a squeeze mapping
Step86: The effect on the unit square is
Step87: Let's show a last one
Step88: Matrix inverse
Step89: We applied a shear mapping on $P$, just like we did before, but then we applied a second transformation to the result, and lo and behold this had the effect of coming back to the original $P$ (we plotted the original $P$'s outline to double check). The second transformation is the inverse of the first one.
Step90: Only square matrices can be inversed. This makes sense when you think about it
Step91: Looking at this image, it is impossible to tell whether this is the projection of a cube or the projection of a narrow rectangular object. Some information has been lost in the projection.
Step92: This transformation matrix performs a projection onto the horizontal axis. Our polygon gets entirely flattened out so some information is entirely lost and it is impossible to go back to the original polygon using a linear transformation. In other words, $F_{project}$ has no inverse. Such a square matrix that cannot be inversed is called a singular matrix (aka degenerate matrix). If we ask NumPy to calculate its inverse, it raises an exception
Step93: Here is another example of a singular matrix. This one performs a projection onto the axis at a 30° angle above the horizontal axis
Step94: But this time, due to floating point rounding errors, NumPy manages to calculate an inverse (notice how large the elements are, though)
Step95: As you might expect, the dot product of a matrix by its inverse results in the identity matrix
Step96: Another way to express this is that the inverse of the inverse of a matrix $M$ is $M$ itself
Step97: Also, the inverse of scaling by a factor of $\lambda$ is of course scaling by a factor or $\frac{1}{\lambda}$
Step98: Finally, a square matrix $H$ whose inverse is its own transpose is an orthogonal matrix
Step99: Determinant
Step100: One of the main uses of the determinant is to determine whether a square matrix can be inversed or not
Step101: That's right, $F_{project}$ is singular, as we saw earlier.
Step102: This determinant is suspiciously close to 0
Step103: Perfect! This matrix can be inversed as we saw earlier. Wow, math really works!
Step104: We rescaled the polygon by a factor of 1/2 on both vertical and horizontal axes so the surface area of the resulting polygon is 1/4$^{th}$ of the original polygon. Let's compute the determinant and check that
Step105: Correct!
Step106: Composing linear transformations
Step107: Since the dot product is associative, the following code is equivalent
Step108: Note that the order of the transformations is the reverse of the dot product order.
Step109: From now on we can perform both transformations in just one dot product, which can lead to a very significant performance boost.
Step110: Singular Value Decomposition
Step111: Note that this is just a 1D array containing the diagonal values of Σ. To get the actual matrix Σ, we can use NumPy's diag function
Step112: Now let's check that $U \cdot \Sigma \cdot V^T$ is indeed equal to F_shear
Step113: It worked like a charm. Let's apply these transformations one by one (in reverse order) on the unit square to understand what's going on. First, let's apply the first rotation $V^T$
Step114: Now let's rescale along the vertical and horizontal axes using $\Sigma$
Step115: Finally, we apply the second rotation $U$
Step116: And we can see that the result is indeed a shear mapping of the original unit square.
Step117: Indeed the horizontal vectors are stretched by a factor of 1.4, and the vertical vectors are shrunk by a factor of 1/1.4=0.714…, so far so good. Let's look at the shear mapping matrix $F_{shear}$
Step118: Wait, what!? We expected just one unit eigenvector, not two. The second vector is almost equal to $\begin{pmatrix}-1 \ 0 \end{pmatrix}$, which is on the same line as the first vector $\begin{pmatrix}1 \ 0 \end{pmatrix}$. This is due to floating point errors. We can safely ignore vectors that are (almost) colinear (ie. on the same line).
Step119: The trace does not have a simple geometric interpretation (in general), but it has a number of properties that make it useful in many areas
|
7,652
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(42)
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import TensorBoard # for part 3.5 on TensorBoard
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True)
model = Sequential()
model.add(Conv2D(96, kernel_size=(11, 11), strides=(4, 4), activation='relu', input_shape=(224, 224, 3)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(256, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(4096, activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(17, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
tensorbrd = TensorBoard('logs/alexnet')
model.fit(X, Y, batch_size=64, epochs=32, verbose=1, validation_split=0.1, shuffle=True,
callbacks=[tensorbrd])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load dependencies
Step2: Load and preprocess data
Step3: Design neural network architecture
Step4: Configure model
Step5: Configure TensorBoard (for part 5 of lesson 3)
Step6: Train!
|
7,653
|
<ASSISTANT_TASK:>
Python Code:
# Author: Eric Larson <larson.eric.d@gmail.com>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
import mne
Brain = mne.viz.get_brain_class()
subjects_dir = mne.datasets.sample.data_path() / 'subjects'
mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir,
verbose=True)
mne.datasets.fetch_aparc_sub_parcellation(subjects_dir=subjects_dir,
verbose=True)
labels = mne.read_labels_from_annot(
'fsaverage', 'HCPMMP1', 'lh', subjects_dir=subjects_dir)
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1')
aud_label = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
brain.add_label(aud_label, borders=False)
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1_combined')
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('aparc_sub')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can also plot a combined set of labels (23 per hemisphere).
Step2: We can add another custom parcellation
|
7,654
|
<ASSISTANT_TASK:>
Python Code:
import pymatgen as pmg
si = pmg.Element('Si')
print('Si:')
print(si.atomic_mass)
print(si.common_oxidation_states)
print(si.number)
lattice = pmg.Lattice.cubic(4.2)
cscl = pmg.Structure(lattice, ["Cs", "Cl"], [[0,0,0],[0.5,0.5,0.5]])
print('\nCsCl structure')
print(cscl)
print('\n')
print(cscl.composition)
cscl.to(filename='POSCAR')
# ...
# The examples are online at http://pymatgen.org/#quick-start
from pymatgen import Structure, Lattice
# convert 335 pm to A:
a = 335*10**-2 # lattice parameter, a
latt = Lattice([[a, 0, 0], [0, a, 0], [0, 0, a]])
Po = Structure(latt, ['Po'], [(0, 0, 0)])
print(Po)
print(Po.volume)
print(Po.density)
a = 2.866 # lattice parameter, a
latt = Lattice([[a, 0, 0], [0, a, 0], [0, 0, a]])
Fe = Structure(latt, ['Fe', 'Fe'], [(0, 0, 0), (0.5, 0.5, 0.5)])
print(Fe)
print(Fe.volume)
print(Fe.density)
a = 65.89**(1.0/3.0) # lattice parameter, a
latt = Lattice([[a, 0, 0], [0, a, 0], [0, 0, a]])
Al = Structure(latt, ['Al', 'Al', 'Al', 'Al'], [(0.0,0.0,0.0), (0.5,0.5,0.0),
(0.5,0.0,0.5), (0.0,0.5,0.5)])
print(Al)
print(Al.volume)
print(Al.density)
a = 5.6402 # lattice parameter, a
latt = Lattice.cubic(a)
NaCl = Structure(latt, ['Na', 'Na', 'Na', 'Na', 'Cl', 'Cl', 'Cl', 'Cl'],
[(0.0,0.0,0.0), (0.5,0.5,0.0), (0.5,0.0,0.5), (0.0,0.5,0.5),
(0.5,0.0,0.0), (0.0,0.5,0.0), (0.0,0.0,0.5), (0.5,0.5,0.5)])
print(NaCl)
print(NaCl.volume)
print(NaCl.density)
NaCl.to(filename='POSCAR')
# found lattice parameters for P6_3/mmc at https://materialsproject.org/materials/mp-46/
# this i
a = 2.939
c = 4.641
latt = Lattice.hexagonal(a, c)
Ti = Structure(latt, ['Ti', 'Ti'],
[(0.0,0.33333,0.0), (0.33333,0.0, 0.5)])
print(Ti)
print(Ti.volume)
print(Ti.density)
Ti.to(filename='POSCAR')
a = 5.6402 # lattice parameter, a
latt = Lattice.cubic(a)
NaCl_spg = Structure.from_spacegroup('Fm-3m', latt, ['Na', 'Cl'], [(0,0,0), (0.5,0.5,0.5)])
print(NaCl_spg)
print(NaCl_spg.volume)
print(NaCl_spg.density)
a, b, c = 4.803, 6.048, 10.323 # lattice parameter, a
latt = Lattice.orthorhombic(a, b, c)
forsterite = Structure.from_spacegroup(62, latt, ['Mg', 'Mg', 'Si', 'O', 'O', 'O'],
[(0, 0, 0),
(0.222736, 0.750000, 0.491625),
(0.093638, 0.250000, 0.425974),
(0.162835, 0.033014, 0.276946),
(0.053218, 0.750000, 0.722149),
(0.091677, 0.250000, 0.765961),])
print(forsterite)
print(forsterite.volume)
print(forsterite.density)
forsterite.to(filename='POSCAR')
from pymatgen import MPRester
with MPRester() as mpr:
Al_api = mpr.get_structure_by_material_id('mp-134')
print(Al_api)
print()
print(Al_api.lattice.matrix)
print(Al_api.volume)
print(Al_api.density)
with MPRester() as mpr:
result = mpr.get_entry_by_material_id('mp-134', property_data=['energy_per_atom'])
print(result.energy)
print(result.energy_per_atom)
# since we just saw that the structure only had one atom, this makes sense.
with MPRester() as mpr:
result = mpr.get_data('C', prop='energy')
print(result)
def mp_dict_to_energies(d):
Return list of energies from a list of dictionaries `{'material_id':mp-id, 'energy': energy}`
Parameters
----------
d (list): List of dictionaries of the form `{'material_id':mp-id, 'energy': energy}`
Returns
-------
list:
List of energies from the input dictionary
energies = []
for entry in d:
energies.append(entry['energy'])
return energies
print(mp_dict_to_energies(result))
energies = [entry['energy'] for entry in result]
print(energies)
with MPRester() as mpr:
struct = mpr.get_structure_by_material_id('mp-510624')
print(struct)
print(struct.formula)
print(struct.volume)
print(struct.density)
struct.scale_lattice(56.965)
print(struct.volume)
struct.replace_species({'Fe':'Co'})
print(struct)
struct.make_supercell([2,2,2]) # or just struct.make_supercell(2) works for this
print(struct)
print(struct.volume)
struct.scale_lattice(struct.volume*1.1)
print(struct.volume)
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
sga = SpacegroupAnalyzer(struct)
print(sga.get_space_group_symbol())
struct.replace(24, 'Fe')
print(struct)
sga = SpacegroupAnalyzer(struct)
print(sga.get_space_group_symbol())
struct.perturb(0.1)
sga = SpacegroupAnalyzer(struct)
print(sga.get_space_group_symbol())
my_string = 'Python is a scientific programming language'
print('Original string:')
print(my_string)
print('Try to mutate the string:')
my_string.replace('scientific', 'fun')
print(my_string)
print('It wasn\'t replaced!')
my_string = my_string.replace('scientific', 'fun')
print(my_string)
print('Now it has been replaced.')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Create the following Structures from Lattices. Instead of using the convience class methods e.g. Lattice.cubic, create them from basis vectors. Print the volumes and densities of the structures after you create them to check yourself. The API documentation for these classes may be helpful (see resources). Pay attention to the units! You are encourage, but not required to use Structure.to('POSCAR') (or CIF) and then load the file into something like VESTA (free) or CrystalMaker (commercial).
Step2: ii. Fe. Body centered cubic with lattice parameter of 2.866 Angstrom
Step3: iii. Al. Face centered cubic with volume of 65.89 cubic Angstrom
Step4: iv. NaCl. Rock salt structure. Use the Lattice.cubic staticmethod and a lattice parameter of 5.6402 Angstrom
Step5: v. Ti. Hexagonal. For this, you can use the hexagonal staticmethod. Try to look up the correct lattice parameter by browsing the Materials Project. It's highly suggested to plot this and convince yourself that the answer is correct. For best results, turn on space filling and increase the boundary to see the close packed structure.
Step6: HCP Ti
Step7: ii. Pick a relatively complex (3 or more component) metal or oxide structure from the Materials Project and create it from the spacegroup. Optionally, visualize it in VESTA to verify that you constructed it correctly.
Step8: 5. We've now streamlined Structure generation, but we can do better still. Using the Materials Project API, we can directly access the structures that we want and modify them from there. Read through the couple examples at the pymatgen REST API and find the actual API documentation and skim it. Then do the following using MPRester
Step9: ii. Get the energy of mp-134.
Step10: iii. Get all of the energies for C (carbon) structures as a dictionary
Step12: iv. Write a function that takes that dictionary and makes a list of energies (in any order)
Step13: v. Do the same as above, except use a list comprehension instead of a function
Step14: vi. What are the drawbacks of storing the energies in this way? Are there drawbacks for storing the data as two lists of ids and energies?
Step15: ii. Look at the band structure and band gap for that structure on the Materials Project website. Is this a metal, semiconductor or an insulator?
Step16: iv. Replace all of the Fe species with Co in one step
Step17: v. Make a 2x2x2 supercell
Step18: vi. Scale the volume up by 10%
Step19: vii. Import and create a SpacegroupAnalyzer from pymatgen.symmetry.analyzer with your structure. Print the spacegroup symbol
Step20: viii. Replace one of Co with an Fe. What is the chemical formula? What is the spacegroup symbol of this structure?
Step21: ix. Break the symmetry with a perturbation of 0.1 Angstrom. What is the spacegroup symbol of this structure?
Step22: x. Explain what the difference is between mutable and immutable objects. What was the implication for these problems?
|
7,655
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pathlib
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]
mnist_train, _ = tf.keras.datasets.mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
# Model has only one input so each data point has one element.
yield [input_value]
converter.representative_dataset = representative_data_gen
tflite_16x8_model = converter.convert()
tflite_model_16x8_file = tflite_models_dir/"mnist_model_quant_16x8.tflite"
tflite_model_16x8_file.write_bytes(tflite_16x8_model)
!ls -lh {tflite_models_dir}
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_16x8 = tf.lite.Interpreter(model_path=str(tflite_model_16x8_file))
interpreter_16x8.allocate_tensors()
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter_16x8.get_input_details()[0]["index"]
output_index = interpreter_16x8.get_output_details()[0]["index"]
interpreter_16x8.set_tensor(input_index, test_image)
interpreter_16x8.invoke()
predictions = interpreter_16x8.get_tensor(output_index)
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
# NOTE: This quantization mode is an experimental post-training mode,
# it does not have any optimized kernels implementations or
# specialized machine learning hardware accelerators. Therefore,
# it could be slower than the float interpreter.
print(evaluate_model(interpreter_16x8))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: int16 アクティベーションによるトレーニング後の整数量子化
Step2: 16x8 量子化モードが使用可能であることを確認します
Step3: モデルをトレーニングしてエクスポートする
Step4: この例では、モデルを 1 エポックでトレーニングしたので、トレーニングの精度は 96% 以下になります。
Step5: .tfliteファイルに書き込みます。
Step6: モデルを 16x8 量子化モードに量子化するには、最初にoptimizationsフラグを設定してデフォルトの最適化を使用します。次に、16x8 量子化モードがターゲット仕様でサポートされる必要な演算であることを指定します。
Step7: int8 トレーニング後の量子化の場合と同様に、コンバーターオプションinference_input(output)_typeを tf.int16 に設定することで、完全整数量子化モデルを生成できます。
Step8: 最後に、通常どおりにモデルを変換します。デフォルトでは、変換されたモデルは呼び出しの便宜上、浮動小数点の入力と出力を引き続き使用します。
Step9: 生成されるファイルのサイズが約1/3であることに注目してください。
Step10: TensorFlow Lite モデルを実行する
Step11: 1 つの画像でモデルをテストする
Step12: モデルを評価する
Step13: 16x8 量子化モデルで評価を繰り返します。
|
7,656
|
<ASSISTANT_TASK:>
Python Code:
str1 = '"Hola" is how we say "hello" in Spanish.'
str2 = "Strings can also be defined with quotes; try to be sistematic."
print str1
print type(str1)
print type(3)
print type(3.)
print str1[0:5]
print str1+str2
print str1.lower()
print str1.upper()
print len(str1)
print str1.replace('h','H')
str3 = 'This is a question'
str3 = str3.replace('i','o')
str3 = str3.lower()
print str3[0:3]
print 'This is just a carriage return symbol.\r This sentence will overwrite the previous text.'
print 'If you wish to start a new line,\r\nthe line feed character should also be used.'
print 'But note that most applications are tolerant\nto the use of \'line feed\' only.'
list1 = ['student', 'teacher', 1997, 2000]
print list1
list2 = [1, 2, 3, 4, 5 ]
print list2
list3 = ["a", "b", "c", "d"]
print list3
print list1[0]
print list2[2:4]
print list3[-1] # negative indices must be avoided, just be aware that Python may not produce an error
list1 = ['student', 'teacher', 1997, 2000]
list1.append(3)
print list1
list1.remove('teacher')
print list1
print list1 + list2
print list1 + ['end of list']
list2 = [1, 2, 3, 4, 5 ]
print len(list2)
print max(list2)
print min(list2)
x = int(raw_input("Please enter an integer: "))
if x < 0:
x = 0
print 'Negative changed to zero'
elif x == 0:
print 'Zero'
elif x == 1:
print 'One'
else:
print 'More than one'
words = ['cat', 'window', 'open-course']
for w in words:
print w, len(w)
words = ['cat', 'window', 'open-course']
for (i, w) in enumerate(words):
print 'element ' + str(i) + ' is ' + w
x = 42
y = x
y = 50
print x
print y
colors1 = ["red", "green"]
colors2 = colors1
colors2 = ["rouge", "vert"]
print colors1
colors1 = ["red", "green"]
colors2 = colors1
colors2[1] = "blue"
print colors1
list1 = ['a','b','c','d']
list2 = list1[:]
list2[1] = 'x'
print list2
print list1
['a', 'b', 'c', 'd']
list3 = ['a','b',['ab','ba']]
list4 = list3[:]
list4[0] = 'c'
list4[2][1] = 'd'
print(list3)
def my_sqrt(number):
Computes the square root of a number.
return number ** (0.5) # In python ** is exponentiation (^ in other languages)
x = my_sqrt(2)
print x
help(my_sqrt)
def nth_root(base, exp=2):
Computes the nth root of a number.
return base ** (1.0/exp) # In python ** is exponentiation (^ in other languages)
print nth_root(10000)
print nth_root(10000,4)
def add_square_to_list(x, my_list, dummy_list):
x = x ** 2
my_list.append(x)
dummy_list = ["I", "am", "not", "a" , "dummy", "list"]
x = 5
my_list =[4, 9, 16]
dummy_list = ["I", "am", "a" , "dummy", "list"]
add_square_to_list(x, my_list, dummy_list)
print x
print my_list
print dummy_list
f = open('workfile', 'wb')
f.write('This is a test\n with 2 lines')
f.close()
f2 = open('workfile', 'r')
text=f2.read()
f2.close()
print text
f2 = open('workfile', 'r')
for line in f2:
print line
f2.close()
import time
import sys
print time.time() # returns the current processor time in seconds
Nwait = 2
print "Waiting %d seconds..." % Nwait
sys.stdout.flush() # Try to execute commenting this line...
time.sleep(2) # suspends execution for the given number of seconds
print time.time() # returns the current processor time in seconds again!!!
print "Done!"
import time as t
print t.time()
from time import time, sleep
print time()
def is_prime(x):
<COMPLETAR>
return isprime
def primes(N):
<COMPLETAR>
return list_of_primes
list_primes = primes(1000)
print "These are the first %d prime numbers:\n" % len(list_primes)
print list_primes
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It is easy to check the type of a variable with the type() command
Step2: The following commands implement some common operations with strings in Python. Have a look at them, and try to deduce what the result of each operation will be. Then, execute the commands and check what are the actual results.
Step3: It is interesting to notice the difference in the use of commands 'lower' and 'len'. Python is an object-oriented language, and str1 is an instance of the Python class 'string'. Then, str1.lower() invokes the method lower() of the class string to which object str1 belongs, while len(str1) or type(str1) imply the use of external methods, not belonging to the class string. In any case, we will not pay (much) attention to these issues during the session.
Step4: 2. Working with Python lists
Step5: To check the value of a list element, indicate between brackets the index (or indices) to obtain the value (or values) at that position (positions).
Step6: To add elements in a list you can use the method append() and to remove them the method remove(). Lists can also be directly added, with the same effect as appending.
Step7: Other useful functions are
Step8: 3. Flow control (with 'for' and 'if')
Step9: Indentation
Step10: In combination with enumerate(), you can iterate over the elementes of the sequence and have an index over them
Step11: 4. Variables and assignments
Step12: The first two lines do not seem problematic. But when y is set to 50, what will happen to the value of x? C programmers will assume that x will be changed to 50 as well, because we said before that y "points" to the location of x. But this is not a C-pointer. Because x and y will not share the same value anymore, y gets his or her own memory location, containing 50 and x sticks to 42.
Step13: Ok. This is what we expected, colors1 is keeping its own values.
Step14: Ouch! That wasn't expected.
Step16: Conclusion
Step17: As we said, you must define a function using def, then the name of the function and in brackets ( ) the list of arguments of the function. The function will not return anything unless you specify it with a return statement.
Step19: Another interesting feature of python is that you can give default values to arguments in a function. For example, in the following code, when the second argument is not used during the call its value is 2.
Step20: One tricky feature in python is how it evaluates the arguments that you pass to a function call. The most common evaluation strategies when passing arguments to a function have been call-by-value and call-by-reference. Python uses a mixture of these two, which is known as "Call-by-Object", sometimes also called "Call by Object Reference" or "Call by Sharing". Let's see it with an example
Step21: If you pass immutable arguments like integers, strings or tuples to a function, the passing acts like call-by-value. The object reference is passed to the function parameters. They can't be changed within the function, because they can't be changed at all, i.e. they are immutable.
Step22: The first argument is a string containing the filename. The second argument defines the mode in which the file will be used
Step23: To read the content of a file, use the function f.read()
Step24: You can also read line by line from the file identifier
Step25: 7. Modules import
Step26: 2) Define a short name to use the library
Step27: 3) Import only some elements of the library
Step28: 8. Exercise
|
7,657
|
<ASSISTANT_TASK:>
Python Code:
from nilearn import plotting
%matplotlib inline
from os.path import join as opj
import json
from nipype.interfaces.spm import Level1Design, EstimateModel, EstimateContrast
from nipype.algorithms.modelgen import SpecifySPMModel
from nipype.interfaces.utility import Function, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from nipype import Workflow, Node
experiment_dir = '/output'
output_dir = 'datasink'
working_dir = 'workingdir'
# list of subject identifiers
subject_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
# TR of functional images
with open('/data/ds000114/task-fingerfootlips_bold.json', 'rt') as fp:
task_info = json.load(fp)
TR = task_info['RepetitionTime']
# Smoothing withds used during preprocessing
fwhm = [4, 8]
# SpecifyModel - Generates SPM-specific Model
modelspec = Node(SpecifySPMModel(concatenate_runs=False,
input_units='secs',
output_units='secs',
time_repetition=TR,
high_pass_filter_cutoff=128),
name="modelspec")
# Level1Design - Generates an SPM design matrix
level1design = Node(Level1Design(bases={'hrf': {'derivs': [1, 0]}},
timing_units='secs',
interscan_interval=TR,
model_serial_correlations='FAST'),
name="level1design")
# EstimateModel - estimate the parameters of the model
level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
name="level1estimate")
# EstimateContrast - estimates contrasts
level1conest = Node(EstimateContrast(), name="level1conest")
# Condition names
condition_names = ['Finger', 'Foot', 'Lips']
# Contrasts
cont01 = ['average', 'T', condition_names, [1/3., 1/3., 1/3.]]
cont02 = ['Finger', 'T', condition_names, [1, 0, 0]]
cont03 = ['Foot', 'T', condition_names, [0, 1, 0]]
cont04 = ['Lips', 'T', condition_names, [0, 0, 1]]
cont05 = ['Finger > others','T', condition_names, [1, -0.5, -0.5]]
cont06 = ['Foot > others', 'T', condition_names, [-0.5, 1, -0.5]]
cont07 = ['Lips > others', 'T', condition_names, [-0.5, -0.5, 1]]
cont08 = ['activation', 'F', [cont02, cont03, cont04]]
cont09 = ['differences', 'F', [cont05, cont06, cont07]]
contrast_list = [cont01, cont02, cont03, cont04, cont05, cont06, cont07, cont08, cont09]
!cat /data/ds000114/task-fingerfootlips_events.tsv
import pandas as pd
trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')
trialinfo
for group in trialinfo.groupby('trial_type'):
print(group)
print("")
def subjectinfo(subject_id):
import pandas as pd
from nipype.interfaces.base import Bunch
trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')
trialinfo.head()
conditions = []
onsets = []
durations = []
for group in trialinfo.groupby('trial_type'):
conditions.append(group[0])
onsets.append(list(group[1].onset - 10)) # subtracting 10s due to removing of 4 dummy scans
durations.append(group[1].duration.tolist())
subject_info = [Bunch(conditions=conditions,
onsets=onsets,
durations=durations,
#amplitudes=None,
#tmod=None,
#pmod=None,
#regressor_names=None,
#regressors=None
)]
return subject_info # this output will later be returned to infosource
# Get Subject Info - get subject specific condition information
getsubjectinfo = Node(Function(input_names=['subject_id'],
output_names=['subject_info'],
function=subjectinfo),
name='getsubjectinfo')
# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id',
'fwhm_id',
'contrasts'],
contrasts=contrast_list),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('fwhm_id', fwhm)]
# SelectFiles - to grab the data (alternativ to DataGrabber)
templates = {'func': opj(output_dir, 'preproc', 'sub-{subject_id}', 'task-{task_id}',
'fwhm-{fwhm_id}_ssub-{subject_id}_ses-test_task-{task_id}_bold.nii'),
'mc_param': opj(output_dir, 'preproc', 'sub-{subject_id}', 'task-{task_id}',
'sub-{subject_id}_ses-test_task-{task_id}_bold.par'),
'outliers': opj(output_dir, 'preproc', 'sub-{subject_id}', 'task-{task_id}',
'art.sub-{subject_id}_ses-test_task-{task_id}_bold_outliers.txt')}
selectfiles = Node(SelectFiles(templates,
base_directory=experiment_dir,
sort_filelist=True),
name="selectfiles")
selectfiles.inputs.task_id = 'fingerfootlips'
# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir,
container=output_dir),
name="datasink")
# Use the following DataSink output substitutions
substitutions = [('_subject_id_', 'sub-')]
subjFolders = [('_fwhm_id_%ssub-%s' % (f, sub), 'sub-%s/fwhm-%s' % (sub, f))
for f in fwhm
for sub in subject_list]
substitutions.extend(subjFolders)
datasink.inputs.substitutions = substitutions
# Initiation of the 1st-level analysis workflow
l1analysis = Workflow(name='l1analysis')
l1analysis.base_dir = opj(experiment_dir, working_dir)
# Connect up the 1st-level analysis components
l1analysis.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('fwhm_id', 'fwhm_id')]),
(infosource, getsubjectinfo, [('subject_id',
'subject_id')]),
(getsubjectinfo, modelspec, [('subject_info',
'subject_info')]),
(infosource, level1conest, [('contrasts', 'contrasts')]),
(selectfiles, modelspec, [('func', 'functional_runs')]),
(selectfiles, modelspec, [('mc_param', 'realignment_parameters'),
('outliers', 'outlier_files')]),
(modelspec, level1design, [('session_info',
'session_info')]),
(level1design, level1estimate, [('spm_mat_file',
'spm_mat_file')]),
(level1estimate, level1conest, [('spm_mat_file',
'spm_mat_file'),
('beta_images',
'beta_images'),
('residual_image',
'residual_image')]),
(level1conest, datasink, [('spm_mat_file', '1stLevel.@spm_mat'),
('spmT_images', '1stLevel.@T'),
('con_images', '1stLevel.@con'),
('spmF_images', '1stLevel.@F'),
('ess_images', '1stLevel.@ess'),
]),
])
# Create 1st-level analysis output graph
l1analysis.write_graph(graph2use='colored', format='png', simple_form=True)
# Visualize the graph
from IPython.display import Image
Image(filename=opj(l1analysis.base_dir, 'l1analysis', 'graph.png'))
l1analysis.run('MultiProc', plugin_args={'n_procs': 4})
!tree /output/datasink/1stLevel
from nilearn.plotting import plot_stat_map
anatimg = '/data/ds000114/derivatives/fmriprep/sub-02/anat/sub-02_t1w_preproc.nii.gz'
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0001.nii', title='average - fwhm=4',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-8/spmT_0001.nii', title='average - fwhm=8',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0002.nii', title='finger - fwhm=4',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0003.nii', title='foot - fwhm=4',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0004.nii', title='lips - fwhm=4',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0005.nii', title='finger - fwhm=4',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0006.nii', title='foot - fwhm=4',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0007.nii', title='lips - fwhm=4',
bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-01/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-01',
bg_img='/data/ds000114/derivatives/fmriprep/sub-01/anat/sub-01_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-02/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-02',
bg_img='/data/ds000114/derivatives/fmriprep/sub-02/anat/sub-02_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-03/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-03',
bg_img='/data/ds000114/derivatives/fmriprep/sub-03/anat/sub-03_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-04/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-04',
bg_img='/data/ds000114/derivatives/fmriprep/sub-04/anat/sub-04_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-05/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-05',
bg_img='/data/ds000114/derivatives/fmriprep/sub-05/anat/sub-05_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-06/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-06',
bg_img='/data/ds000114/derivatives/fmriprep/sub-06/anat/sub-06_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-07/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-07',
bg_img='/data/ds000114/derivatives/fmriprep/sub-07/anat/sub-07_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-08/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-08',
bg_img='/data/ds000114/derivatives/fmriprep/sub-08/anat/sub-08_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-09/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-09',
bg_img='/data/ds000114/derivatives/fmriprep/sub-09/anat/sub-09_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
plot_stat_map(
'/output/datasink/1stLevel/sub-10/fwhm-4/spmT_0002.nii', title='finger - fwhm=4 - sub-10',
bg_img='/data/ds000114/derivatives/fmriprep/sub-10/anat/sub-10_t1w_preproc.nii.gz',
threshold=3, display_mode='y', cut_coords=(5, 10, 15, 20), dim=-1);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Experiment parameters
Step2: Specify Nodes
Step3: Specify GLM contrasts
Step4: Specify GLM Model
Step5: We can also create a data frame using pandas library.
Step6: And finally we need to separate the onsets of the three conditions, i.e. group by trial_type. This can be done as follows
Step7: Now, let us incorporate all this in the helper function subjectinfo.
Step8: Specify input & output stream
Step9: Specify Workflow
Step10: Visualize the workflow
Step11: Run the Workflow
Step12: Inspect output
Step13: Visualize results
Step14: Now, let's look at the three contrasts Finger, Foot, Lips.
Step15: We can also check three additional contrasts Finger > others, Foot > others and Lips > others.
Step16: Special case
|
7,658
|
<ASSISTANT_TASK:>
Python Code:
writefile("funwave-input.tpl",
!INPUT FILE FOR FUNWAVE_TVD
! NOTE: all input parameter are capital sensitive
! --------------------TITLE-------------------------------------
! title only for log file
TITLE = VESSEL
! -------------------HOT START---------------------------------
HOT_START = F
FileNumber_HOTSTART = 1
! -------------------PARALLEL INFO-----------------------------
!
! PX,PY - processor numbers in X and Y
! NOTE: make sure consistency with mpirun -np n (px*py)
!
PX = 2
PY = 1
! --------------------DEPTH-------------------------------------
! Depth types, DEPTH_TYPE=DATA: from depth file
! DEPTH_TYPE=FLAT: idealized flat, need depth_flat
! DEPTH_TYPE=SLOPE: idealized slope,
! need slope,SLP starting point, Xslp
! and depth_flat
DEPTH_TYPE = FLAT
DEPTH_FLAT = 10.0
! -------------------PRINT---------------------------------
! PRINT*,
! result folder
RESULT_FOLDER = output/
! ------------------DIMENSION-----------------------------
! global grid dimension
Mglob = 500
Nglob = 100
! ----------------- TIME----------------------------------
! time: total computational time/ plot time / screen interval
! all in seconds
TOTAL_TIME = 4.0
PLOT_INTV = 1.0
PLOT_INTV_STATION = 50000.0
SCREEN_INTV = 1.0
HOTSTART_INTV = 360000000000.0
WAVEMAKER = INI_GAU
AMP = %AMP%
Xc = 250.0
Yc = 50.0
WID = %WID%
! -----------------GRID----------------------------------
! if use spherical grid, in decimal degrees
! cartesian grid sizes
DX = 1.0
DY = 1.0
! ----------------SHIP WAKES ----------------------------
VESSEL_FOLDER = ./
NumVessel = 2
! -----------------OUTPUT-----------------------------
ETA = T
U = T
V = T
)
writefile("funwave-wrapper.txt",
#!/bin/bash
# This is a generic wrapper script for checking the runtime environment
# of a job and verifying the runtime template variable values
set +x
date
# print out the agave runtiem variables
echo "##################################################"
echo "# Agave Job Runtime Variables "
echo "##################################################"
echo "\n"
echo 'AGAVE_JOB_NAME="\${AGAVE_JOB_NAME}"'
echo 'AGAVE_JOB_ID="\${AGAVE_JOB_ID}"'
echo 'AGAVE_JOB_APP_ID="\${AGAVE_JOB_APP_ID}"'
echo 'AGAVE_JOB_EXECUTION_SYSTEM="\${AGAVE_JOB_EXECUTION_SYSTEM}"'
echo 'AGAVE_JOB_BATCH_QUEUE="\${AGAVE_JOB_BATCH_QUEUE}"'
echo 'AGAVE_JOB_SUBMIT_TIME="\${AGAVE_JOB_SUBMIT_TIME}"'
echo 'AGAVE_JOB_ARCHIVE_SYSTEM="\${AGAVE_JOB_ARCHIVE_SYSTEM}"'
echo 'AGAVE_JOB_ARCHIVE_PATH="\${AGAVE_JOB_ARCHIVE_PATH}"'
echo 'AGAVE_JOB_NODE_COUNT="\${AGAVE_JOB_NODE_COUNT}"'
echo 'AGAVE_JOB_PROCESSORS_PER_NODE="\${AGAVE_JOB_PROCESSORS_PER_NODE}"'
echo 'AGAVE_JOB_MEMORY_PER_NODE="\${AGAVE_JOB_MEMORY_PER_NODE}"'
echo 'AGAVE_JOB_ARCHIVE_URL="\${AGAVE_JOB_ARCHIVE_URL}"'
echo 'AGAVE_JOB_OWNER="\${AGAVE_JOB_OWNER}"'
echo 'AGAVE_JOB_TENANT="\${AGAVE_JOB_TENANT}"'
echo 'AGAVE_JOB_ARCHIVE="\${AGAVE_JOB_ARCHIVE}"'
echo 'AGAVE_JOB_MAX_RUNTIME="\${AGAVE_JOB_MAX_RUNTIME}"'
echo 'AGAVE_JOB_MAX_RUNTIME_SECONDS="\${AGAVE_JOB_MAX_RUNTIME_SECONDS}"'
echo 'AGAVE_JOB_MAX_RUNTIME_MILLISECONDS="\${AGAVE_JOB_MAX_RUNTIME_MILLISECONDS}"'
echo 'AGAVE_BASE_URL="\${AGAVE_BASE_URL}"'
echo 'AGAVE_JOB_ARCHIVE="\${AGAVE_JOB_ARCHIVE}"'
echo 'AGAVE_CACHE_DIR="\${AGAVE_CACHE_DIR}"'
echo 'AGAVE_JOB_ACCESS_TOKEN="\${AGAVE_JOB_ACCESS_TOKEN}"'
echo 'AGAVE_JOB_REFRESH_TOKEN="\${AGAVE_JOB_REFRESH_TOKEN}"'
echo 'AGAVE_JOB_PACKAGE_OUTPUT="\${AGAVE_JOB_PACKAGE_OUTPUT}"'
echo 'AGAVE_JOB_COMPRESS_OUTPUT="\${AGAVE_JOB_COMPRESS_OUTPUT}"'
echo "##################################################"
echo "# Job Runtime Environment "
echo "##################################################"
echo "\n"
LD_LIBRARY_PATH=/usr/local/lib
# print environment
env
# copy to file for usage later on
env > ./environment.out
# resolve our job request parameters in funwave's input file
cp -f funwave-input.tpl input.txt
sed -i 's#%WID%#\${WID}#' input.txt
sed -i 's#%AMP%#\${AMP}#' input.txt
# run our funwave command
mpirun -np 2 /home/jovyan/FUNWAVE-TVD/src/funwave_vessel
# if the job request set the parameter compress_ouput to true
# the folder will be compressed
if [[ -z "\${compress_output}" ]]; then
tar czf output.tgz output
fi
)
!files-mkdir -S ${AGAVE_STORAGE_SYSTEM_ID} -N funwave-${AGAVE_APP_DEPLOYMENT_PATH}
!files-upload -F funwave-wrapper.txt -S ${AGAVE_STORAGE_SYSTEM_ID} funwave-${AGAVE_APP_DEPLOYMENT_PATH}/
!files-upload -F funwave-input.tpl -S ${AGAVE_STORAGE_SYSTEM_ID} funwave-${AGAVE_APP_DEPLOYMENT_PATH}/
writefile("funwave-test.txt",
compress_output=1
AMP=2.0
WID=24.0
funwave-wrapper.txt
)
!files-mkdir -S ${AGAVE_STORAGE_SYSTEM_ID} -N funwave-${AGAVE_APP_DEPLOYMENT_PATH}
!files-upload -F funwave-test.txt -S ${AGAVE_STORAGE_SYSTEM_ID} funwave-${AGAVE_APP_DEPLOYMENT_PATH}/
writefile("funwave-app.txt",
{
"name":"${AGAVE_USERNAME}-${MACHINE_NAME}-funwave",
"version":"1.0",
"label":"Runs a command",
"shortDescription":"Runs a command",
"longDescription":"",
"deploymentSystem":"${AGAVE_STORAGE_SYSTEM_ID}",
"deploymentPath":"funwave-${AGAVE_APP_DEPLOYMENT_PATH}",
"templatePath":"funwave-wrapper.txt",
"testPath":"funwave-test.txt",
"executionSystem":"${AGAVE_EXECUTION_SYSTEM_ID}",
"executionType":"CLI",
"parallelism":"SERIAL",
"modules":[],
"inputs":[],
"parameters":[{
"id" : "compress_output",
"value" : {
"visible":true,
"required":true,
"type":"bool",
"order":0,
"enquote":false,
"default":true
},
"details":{
"label": "Compress output folder",
"description": "If true, output will be packed and compressed",
"argument": null,
"showArgument": false,
"repeatArgument": false
},
"semantics":{
"argument": null,
"showArgument": false,
"repeatArgument": false
}
},
{
"id" : "AMP",
"value" : {
"visible":true,
"required":true,
"type":"string",
"order":0,
"enquote":false,
"default":"3.0",
"validator": "\\\\d+\\\\.\\\\d+"
},
"details":{
"label": "Wave amplitude",
"description": "Wave amplitude expressed as a decimal value",
"argument": null,
"showArgument": false,
"repeatArgument": false
},
"semantics":{
"argument": null,
"showArgument": false,
"repeatArgument": false
}
},
{
"id" : "WID",
"value" : {
"visible":true,
"required":true,
"type":"string",
"order":0,
"enquote":false,
"default":"25.0",
"validator": "\\\\d+\\\\.\\\\d+"
},
"details":{
"label": "Width",
"description": "Width expressed as a decimal value",
"argument": null,
"showArgument": false,
"repeatArgument": false
},
"semantics":{
"argument": null,
"showArgument": false,
"repeatArgument": false
}
}],
"outputs":[]
}
)
!apps-addupdate -F funwave-app.txt
setvar(
EVENT=*
WEBHOOK_URL=https://hooks.slack.com/services/
WEBHOOK_URL=${REQUESTBIN_URL}?name=\\\${JOB_NAME}&event=\${EVENT}&jobid=\\\${JOB_ID}
)
writefile("funwave-job.txt",
{
"name":"funwave-1",
"appId": "${AGAVE_USERNAME}-${MACHINE_NAME}-funwave-1.0",
"executionSystem": "${AGAVE_EXECUTION_SYSTEM_ID}",
"maxRunTime":"00:10:00",
"archive": false,
"notifications": [
{
"url":"${WEBHOOK_URL}",
"event":"*",
"persistent":"true"
}
],
"parameters": {
"compress_output":true,
"AMP":"4.0",
"WID":"20.0"
}
}
)
setvar(
# Capture the output of the job submit command
OUTPUT=$(jobs-submit -F funwave-job.txt)
# Parse out the job id from the output
JOB_ID=$(echo $OUTPUT | cut -d' ' -f4)
)
!echo ${REQUESTBIN_URL}?inspect
for iter in range(20):
setvar("STAT=$(jobs-status $JOB_ID)")
stat = os.environ["STAT"]
sleep(5.0)
if stat == "FINISHED" or stat == "FAILED":
break
!echo jobs-history ${JOB_ID}
!jobs-history ${JOB_ID}
!jobs-list -l 5
!jobs-output-list --rich --filter=type,length,name ${JOB_ID}
!jobs-output-get ${JOB_ID} funwave-1.out
!cat funwave-1.out
!jobs-output-get ${JOB_ID} funwave-1.ipcexe
!cat funwave-1.ipcexe
!jobs-output-get ${JOB_ID} input.txt
!cat input.txt
!jobs-output-get ${JOB_ID} funwave-1.err
!cat funwave-1.err
setvar("AGAVE_FRIEND=training001")
!systems-roles-list ${AGAVE_EXECUTION_SYSTEM_ID}
!systems-roles-addupdate -u ${AGAVE_FRIEND} -r PUBLISHER ${AGAVE_EXECUTION_SYSTEM_ID}
!systems-roles-list ${AGAVE_EXECUTION_SYSTEM_ID}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Adding dynamic inputs
Step3: Smarter wrapper templates
Step4: Using Agave commands, we make a directory on the storage server an deploy our wrapper file there.
Step6: All agave applications require a test file. The test file is a free form text file which allows you to specify what resources you might need to test your application.
Step8: More descriptive apps
Step11: Running Jobs
Step13: Because the setvar() command can evalute $() style bash shell substitutions, we will use it to submit our job. This will capture the output of the submit command, and allow us to parse it for the JOB_ID. We'll use the JOB_ID in several subsequent steps.
Step14: <h2>Job Monitoring and Output</h2>
Step15: Of course, you can also monitor the job status by polling. Note that the notifications you receive via email and webhook are less wasteful of resources. However, we show you this for completeness.
Step16: The jobs-history command provides you a record of the steps of what your job did. If your job fails for some reason, this is your best diagnostic.
Step17: This command shows you the job id's and status of the last 5 jobs you ran.
Step18: This next command provides you with a list of all the files generated by your job. You can use it to figure out which files you want to retrieve with jobs-output-get.
Step19: Retrieve the standard output.
Step20: Check our resolved wrapper template to verify the variables were replaced.
Step21: Check the input.txt file resolved from the input template with our runtime values
Step22: Retrieve the standard error output.
Step23: In this next example, we run a job using an application created by the person sitting next to you. Simply edit the value for AGAVE_FRIEND
|
7,659
|
<ASSISTANT_TASK:>
Python Code:
!pip install oauth2
!pip install unidecode
%matplotlib inline
from collections import defaultdict
import json
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import rcParams
import matplotlib.cm as cm
import matplotlib as mpl
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843)]
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.facecolor'] = 'white'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'white'
rcParams['patch.facecolor'] = dark2_colors[0]
rcParams['font.family'] = 'StixGeneral'
def remove_border(axes=None, top=False, right=False, left=True, bottom=True):
Minimize chartjunk by stripping out unnecesasry plot borders and axis ticks
The top/right/left/bottom keywords toggle whether the corresponding plot border is drawn
ax = axes or plt.gca()
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
ax.spines['left'].set_visible(left)
ax.spines['bottom'].set_visible(bottom)
#turn off all ticks
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
#now re-enable visibles
if top:
ax.xaxis.tick_top()
if bottom:
ax.xaxis.tick_bottom()
if left:
ax.yaxis.tick_left()
if right:
ax.yaxis.tick_right()
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
#Johanna
#user_token = '6a516d33-786e-443c-b6e9-def654f88098'
#user_secret = 'c03c49da-9dae-4b05-a2af-82e40426439f'
#api_key = 'xpsswsigqw4r'
#secret_key = 'aIRpJHhA8JHTRsyb'
#Alex
#api_key = 'g8lq60ilatfh'
#secret_key = 'XEOmeklHWHtmwgoQ'
#user_token = 'a8991ba6-9a27-40d7-ac6f-9280cc1dc650'
#user_secret = '43a11017-c1f3-4c30-afab-43df3c39b938'
#Nicolas
user_token = 'd41f3e0c-6bb9-4db8-b324-25a723ff2f50'
user_secret = 'fc66e892-6f92-4e15-b9a9-b0cccbec5336'
api_key = 'kg7oy496e09a'
secret_key = 'oLCLRNxVjt8ZY6OE'
import oauth2 as oauth
import urlparse
def request_token(consumer):
client = oauth.Client(consumer)
request_token_url = 'https://api.linkedin.com/uas/oauth/requestToken?scope=r_network'
resp, content = client.request(request_token_url, "POST")
if resp['status'] != '200':
raise Exception("Invalid response %s." % resp['status'])
request_token = dict(urlparse.parse_qsl(content))
return request_token
#consumer = oauth.Consumer(api_key, secret_key)
#r_token = request_token(consumer)
#print "Request Token: oauth_token: %s, oauth_token_secret: %s" % (r_token['oauth_token'], r_token['oauth_token_secret'])
def authorize(request_token):
authorize_url ='https://api.linkedin.com/uas/oauth/authorize'
print "Go to the following link in your browser:"
print "%s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])
print
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
oauth_verifier = raw_input('What is the PIN? ')
return oauth_verifier
#oauth_verifier = authorize(r_token)
def access(consumer, request_token, oauth_verifier):
access_token_url = 'https://api.linkedin.com/uas/oauth/accessToken'
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(oauth_verifier)
client = oauth.Client(consumer, token)
resp, content = client.request(access_token_url, "POST")
access_token = dict(urlparse.parse_qsl(content))
return access_token
#a_token = access(consumer, r_token, oauth_verifier)
#print a_token
#print "Access Token: oauth_token = %s, oauth_token_secret = %s" % (a_token['oauth_token'], a_token['oauth_token_secret'])
#print "You may now access protected resources using the access tokens above."
consumer = oauth.Consumer(api_key, secret_key)
r_token = request_token(consumer)
print "Request Token: oauth_token: %s, oauth_token_secret: %s" % (r_token['oauth_token'], r_token['oauth_token_secret'])
oauth_verifier = authorize(r_token)
a_token = access(consumer, r_token, oauth_verifier)
print a_token
print "Access Token: oauth_token = %s, oauth_token_secret = %s" % (a_token['oauth_token'], a_token['oauth_token_secret'])
print "You may now access protected resources using the access tokens above."
import simplejson
import codecs
output_file = 'linkedIn_links.csv'
my_name = 'Your Name'
def linkedin_connections():
# Use your credentials to build the oauth client
consumer = oauth.Consumer(key=api_key, secret=secret_key)
token = oauth.Token(key=a_token['oauth_token'], secret=a_token['oauth_token_secret'])
client = oauth.Client(consumer, token)
# Fetch first degree connections
resp, content = client.request('http://api.linkedin.com/v1/people/~/connections?format=json')
results = simplejson.loads(content)
# File that will store the results
output = codecs.open(output_file, 'w', 'utf-8')
# Loop through the 1st degree connection and see how they connect to each other
for result in results["values"]:
con = "%s %s" % (result["firstName"].replace(",", " "), result["lastName"].replace(",", " "))
print >>output, "%s,%s" % (my_name, con)
# This is the trick, use the search API to get related connections
u = "https://api.linkedin.com/v1/people/%s:(relation-to-viewer:(related-connections))?format=json" % result["id"]
resp, content = client.request(u)
rels = simplejson.loads(content)
try:
for rel in rels['relationToViewer']['relatedConnections']['values']:
sec = "%s %s" % (rel["firstName"].replace(",", " "), rel["lastName"].replace(",", " "))
print >>output, "%s,%s" % (con, sec)
except:
pass
linkedin_connections()
from operator import itemgetter
from unidecode import unidecode
clean_output_file = 'linkedIn_links_clean.csv'
def stringify(chain):
# Simple utility to build the nodes labels
allowed = '0123456789abcdefghijklmnopqrstuvwxyz_'
c = unidecode(chain.strip().lower().replace(' ', '_'))
return ''.join([letter for letter in c if letter in allowed])
def clean(f_input, f_output):
output = open(f_output, 'w')
# Store the edges inside a set for dedup
edges = set()
for line in codecs.open(f_input, 'r', 'utf-8'):
from_person, to_person = line.strip().split(',')
_f = stringify(from_person)
_t = stringify(to_person)
# Reorder the edge tuple
_e = tuple(sorted((_f, _t), key=itemgetter(0, 1)))
edges.add(_e)
for edge in edges:
print >>output, '%s,%s' % (edge[0], edge[1])
clean(output_file, clean_output_file)
import csv
from collections import defaultdict
pairlist=[]
connections=defaultdict(list)
userset=set()
with open('linkedIn_links_clean.csv', 'rb') as csvfile:
allrows = csv.reader(csvfile, delimiter=',')
for row in allrows:
# if ((row[0]=='your_name') | (row[1]=='your_name')): continue # exclude yourself ?
pairlist.append((row[0], row[1]))
connections[row[0]].append(row[1])
connections[row[1]].append(row[0])
userset.add(row[0])
userset.add(row[1])
## Actual algorithm starts here
pagerank=defaultdict(lambda:1./len(userset)) # to get normalized values (e.g., to compare with eigenvectors) = probabilities
#pagerank=defaultdict(lambda:1.)
for iteration in xrange(0, 30):
newpagerank=defaultdict(lambda:0.)
for user in userset:
for connection in connections[user]:
newpagerank[user] += pagerank[connection]/len(connections[connection])
pagerank = newpagerank
prs = sorted(pagerank.iteritems(), reverse=True, key=lambda (k,v): v)
print prs[:10]
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import math
g = nx.Graph()
remove_me = False
for user in userset:
if remove_me & (user=='your_name'): continue
g.add_node(user)
for user in userset:
if remove_me & (user=='your_name'): continue
nconnec = 0
for connection in connections[user]:
if remove_me & (connection=='your_name'): continue
g.add_edge(user, connection, weight = 1)
nconnec+=1
if remove_me & (nconnec==0):
g.remove_node(user)
pagerank_nx = nx.pagerank_scipy(g)
color = [(min(pagerank_nx[n]*30.,1),min(pagerank_nx[n]*30.,1), min(pagerank_nx[n]*30.,1)) for n in pagerank_nx]
pos = nx.spring_layout(g, iterations=100)
nx.draw_networkx_edges(g, pos, width=1, alpha=0.4)
nx.draw_networkx_nodes(g, pos, node_color=color, node_size=100, alpha=1, linewidths =0.5)
#lbls = nx.draw_networkx_labels(g, pos)
plt.show()
# checks whether we have the same, or similar, pageranks
sorted_pr = sorted(pagerank_nx.iteritems(), reverse=True, key=lambda (k,v): v)
print sorted_pr[:10]
# your number of connection
print 'my degree is: ', g.degree('your_name'), '\n'
# diameter = maximum nb of edges between 2 nodes = always 2 in this case
print 'the graph diameter is: ',nx.diameter(g), '\n'
#center : surprising ?
print 'the center is: ',nx.center(g), '\n'
# number of clique communities of 5 nodes
print 'there are ', len(list(nx.k_clique_communities(g, 5))),'clique communities\n'
# most influential ?
print 'degree: ', g.degree(sorted_pr[2]),'\n'
print 'shortest path between Hanspeter and a friend', nx.shortest_path(g,source='hanspeter_pfister',target='etienne_corteel'),'\n'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lab 9 - Graphs & Networks
Step2: 1. Get API key
Step3: 2. Get Access Token
Step4: 3. Get data, clean it and store to disk
Step5: When you have run these cells you have a 'linkedIn_links_clean.csv' file in the directory of your notebook, that is compatible with gephi. If you don't have a LinkedIn account or think your network is boring you can use one of ours which you can get here.
Step6: and now, with NetworkX !
Step7: A few stats about your network
|
7,660
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'sandbox-3', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
7,661
|
<ASSISTANT_TASK:>
Python Code:
from pycobra.cobra import Cobra
from pycobra.diagnostics import Diagnostics
import numpy as np
%matplotlib inline
# setting up our random data-set
rng = np.random.RandomState(1)
# D1 = train machines; D2 = create COBRA; D3 = calibrate epsilon, alpha; D4 = testing
n_features = 20
D1, D2, D3, D4 = 200, 200, 200, 200
D = D1 + D2 + D3 + D4
X = rng.uniform(-1, 1, D * n_features).reshape(D, n_features)
Y = np.power(X[:,1], 2) + np.power(X[:,3], 3) + np.exp(X[:,10])
# Y = np.power(X[:,0], 2) + np.power(X[:,1], 3)
# training data-set
X_train = X[:D1 + D2]
X_test = X[D1 + D2 + D3:D1 + D2 + D3 + D4]
X_eps = X[D1 + D2:D1 + D2 + D3]
# for testing
Y_train = Y[:D1 + D2]
Y_test = Y[D1 + D2 + D3:D1 + D2 + D3 + D4]
Y_eps = Y[D1 + D2:D1 + D2 + D3]
cobra = Cobra(random_state=0, epsilon=0.5)
cobra.fit(X_train, Y_train, default=False)
cobra.split_data(D1, D1 + D2, shuffle_data=True)
cobra.load_default()
query = X_test[9].reshape(1, -1)
cobra.machines_
cobra.machines_['lasso'].predict(query)
cobra.machines_['tree'].predict(query)
cobra.machines_['ridge'].predict(query)
cobra.machines_['random_forest'].predict(query)
cobra.load_machine_predictions()
cobra.predict(query)
Y_test[9]
cobra_diagnostics = Diagnostics(cobra, X_test, Y_test, load_MSE=True)
cobra_diagnostics.machine_MSE
cobra_diagnostics.error_bound
cobra_diagnostics.optimal_split(X_eps, Y_eps)
split = [(0.05, 0.95), (0.10, 0.90), (0.20, 0.80), (0.40, 0.60), (0.50, 0.50), (0.60, 0.40), (0.80, 0.20), (0.90, 0.10), (0.95, 0.05)]
cobra_diagnostics.optimal_split(X_eps, Y_eps, split=split, info=True, graph=True)
cobra_diagnostics.optimal_epsilon(X_eps, Y_eps, line_points=100)
cobra_diagnostics.optimal_alpha(X_eps, Y_eps, info=True)
cobra_diagnostics.optimal_machines(X_eps, Y_eps, info=True)
cobra_diagnostics.optimal_alpha_grid(X_eps[0], Y_eps[0], line_points=100)
cobra_diagnostics.optimal_machines_grid(X_eps[0], Y_eps[0], line_points=100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up data set
Step2: Setting up COBRA
Step3: When we are fitting, we initialise COBRA with an epsilon value of $0.5$ - this is because we are aware of the distribution and 0.5 is a fair guess of what would be a "good" epsilon value, because the data varies from $-1$ to $1$.
Step4: Let's load the default machines to COBRA.
Step5: We note here that further machines can be loaded using either the loadMachine() and loadSKMachine() methods. The only prerequisite is that the machine has a valid predict() method.
Step6: Aggregate!
Step7: Optimizing COBRA
Step8: This error is bound by the value $C\mathscr{l}^{\frac{-2}{M + 2}}$ upto a constant $C$, which is problem dependant. For more details, we refer the user to the original paper.
Step9: Playing with Data-Splitting
Step10: What we saw was the default result, with the optimal split ratio and the corresponding MSE. We can do a further analysis here by enabling the info and graph options, and using more values to split on.
Step11: Alpha, Epsilon and Machines
Step12: Alpha
Step13: In this particular case, the best performance is obtained by seeking consensus over all 4 machines.
|
7,662
|
<ASSISTANT_TASK:>
Python Code:
pulses = otx.getall()
len(pulses)
json_normalize(pulses)[0:5]
json_normalize(pulses[1]["indicators"])
indicator_types = [
{
"name": "IPv4",
"description": "An IPv4 address indicating the online location of a server or other computer."
},
{
"name": "IPv6",
"description": "An IPv6 address indicating the online location of a server or other computer."
},
{
"name": "domain",
"description": "A domain name for a website or server. Domains encompass a series of hostnames."
},
{
"name": "hostname",
"description": "The hostname for a server located within a domain."
},
{
"name": "email",
"description": "An email associated with suspicious activity."
},
{
"name": "URL",
"description": " Uniform Resource Location (URL) summarizing the online location of a file or resource."
},
{
"name": "URI",
"description": "Uniform Resource Indicator (URI) describing the explicit path to a file hosted online."
},
{
"name": "FileHash-MD5",
"description": "A MD5-format hash that summarizes the architecture and content of a file."
},
{
"name": "FileHash-SHA1",
"description": "A SHA-format hash that summarizes the architecture and content of a file."
},
{
"name": "FileHash-SHA256",
"description": "A SHA-256-format hash that summarizes the architecture and content of a file."
},
{
"name": "FileHash-PEHASH",
"description": "A PEPHASH-format hash that summarizes the architecture and content of a file."
},
{
"name": "FileHash-IMPHASH",
"description": "An IMPHASH-format hash that summarizes the architecture and content of a file."
},
{
"name": "CIDR",
"description": "Classless Inter-Domain Routing (CIDR) address, which describes both a server's IP address and the network architecture (routing path) surrounding that server."
},
{
"name": "FilePath",
"description": "A unique location in a file system."
},
{
"name": "Mutex",
"description": "The name of a mutex resource describing the execution architecture of a file."
},
{
"name": "CVE",
"description": "Common Vulnerability and Exposure (CVE) entry describing a software vulnerability that can be exploited to engage in malicious activity."
}]
json_normalize(indicator_types)
mtime = (datetime.now() - timedelta(days=1)).isoformat()
mtime
events = otx.getevents_since(mtime)
json_normalize(events)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's list a few pulses
Step2: author_name
Step3: _id
Step4: Besides receiving the pulse information, there is another function that can retrieve different events that are ocurring in the OTX system and affect your account.
|
7,663
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv('atlas-taggings.csv')
df[2:5]
articles = df[df.tagged_type == 'Article']
articles.tag_url = articles.tag_url.apply(get_tag_name)
articles = get_dummies_and_join(articles,'tag_url')
articles = articles.drop(['tag_id','tag_url','tagged_type','tagged_id'],axis=1)
articles = unique_article_set(articles,'tagged_url')
articles = articles.reset_index().set_index('tagged_url')
pageviews = pd.read_csv('output_articles_performance.csv',header=None,names=[
'url','published','pageviews'
])
pageviews.url = ['www.atlasobscura.com/articles/' + x for x in pageviews.url]
pageviews.describe()
pageviews.set_index('url',inplace=True)
article_set = articles.join(pageviews)
article_set['ten_thousand'] = target_pageview_cutoff(10000,article_set.pageviews)
article_set['published'] = pd.to_datetime(article_set['published'])
article_set['year'] = get_year(article_set,'published')
article_set.pageviews.plot(kind='density',title='Page View Distribution, All Articles')
ax = article_set.boxplot(column='pageviews',by='year',figsize=(6,6),showfliers=False)
ax.set(title='PV distribution by year of publication, no outliers',ylabel='pageviews')
sns.factorplot(
x='year',
y='ten_thousand',
data = article_set
)
total_tagged = get_total_tagged(article_set,'num_tagged')
article_set.fillna(value=0,inplace=True)
y = article_set.ten_thousand
X = article_set.drop(['pageviews','published','ten_thousand'],axis=1)
cross_val_score = get_cross_validation_score(X,y,linear_model.LogisticRegression(penalty = 'l1'),
n_folds=5)
lr = linear_model.LogisticRegression(penalty = 'l1').fit(X,y)
lr_scores = lr.predict_proba(X)[:,1]
roc_score = get_roc_scores(y,lr_scores)
print roc_score
single_tag_probabilities = get_probabilities(lr,X)
params = {'n_neighbors' : [x for x in range(2,100,4)],
'weights' : ['distance','uniform']}
gs = GridSearchCV(estimator = KNeighborsClassifier(),param_grid=params,
n_jobs=-1,cv=10,verbose=1)
gs.fit(X,y)
print gs.best_params_
print gs.best_score_
knn = gs.best_estimator_.fit(X,y)
knn_probs = get_probabilities(knn,X)
knn_cross_val_score = get_cross_validation_score(X,y,knn,5)
knn_scores = knn.predict_proba(X)[:,1]
knn_roc_score = get_roc_scores(y,knn_scores)
params_rfc = {'max_depth': np.arange(20,100,5),
'min_samples_leaf': np.arange(90,200,5),
'n_estimators': [20],
'criterion' : ['gini','entropy']
}
gs1 = GridSearchCV(RandomForestClassifier(),param_grid=params_rfc, cv=10, scoring='roc_auc',n_jobs=-1,verbose=1)
gs1.fit(X,y)
print gs1.best_params_
print gs1.best_score_
rf = gs1.best_estimator_
rf.fit(X,y)
rf_cross_val_score = get_cross_validation_score(X,y,rf,5)
rf_scores = rf.predict_proba(X)[:,1]
rf_roc_score = get_roc_scores(y,rf_scores)
print "Logistic Regression Cross-validation Score: ", cross_val_score
print "K Nearest Neighbors Cross-validation Score: ", knn_cross_val_score
print "RandomForest Cross-validation Score: ", rf_cross_val_score
print "Logistic Regressions ROC AUC score: ", roc_score
print "K Nearest Neighbors ROC AUC score: ", knn_roc_score
print "RandomForest ROC AUC score: ", rf_roc_score
url, taglist = get_article_tags('http://www.atlasobscura.com/articles/the-ao-exit-interview-12-years-in-the-blue-man-group')
transformed_article = transform_article_for_prediction(url,article_set)
article_set.head(1)
y1 = article_set[article_set.year >= 2016].ten_thousand
X1 = article_set[article_set.year >= 2016].drop(['pageviews','published','ten_thousand'],axis=1)
cross_val_score1 = get_cross_validation_score(X1,y1,linear_model.LogisticRegression(penalty = 'l1'),
n_folds=5)
lr1 = linear_model.LogisticRegression(penalty = 'l1').fit(X1,y1)
lr_scores1 = lr1.predict_proba(X1)[:,1]
roc_score1 = get_roc_scores(y1,lr_scores1)
print roc_score1
simplereach = pd.read_csv('~/Downloads/all-content-simplereach.csv')
simplereach.Url = simplereach.Url.apply(get_simplereach_url)
simplereach = simplereach.set_index('Url')
simplereach = simplereach[['Avg Engaged Time','Social Actions','Facebook Shares','FaceBook Referrals']]
article_set2 = article_set.join(simplereach['Facebook Shares'])
article_set2['five_hundred_shares'] = target_pageview_cutoff(500,article_set2['Facebook Shares'])
y2 = article_set2.five_hundred_shares
X2 = article_set2.drop(['pageviews',
'published',
'ten_thousand',
'Facebook Shares',
'five_hundred_shares'
],axis=1)
cross_val_score_social = get_cross_validation_score(X2,y2,linear_model.LogisticRegression(penalty = 'l1'),
n_folds=5)
lr_social = linear_model.LogisticRegression(penalty = 'l1').fit(X2,y2)
lr_scores_social = lr_social.predict_proba(X2)[:,1]
roc_score_social = get_roc_scores(y2,lr_scores_social)
print "Cross-val score when predicting Facebook shares > 500: ", cross_val_score_social
print "ROC AUC score when predicting Facebook shares > 500: ",roc_score_social
url = 'http://www.atlasobscura.com/articles/winters-effigies-the-deviant-history-of-the-snowman'
lr2.predict(transform_article_for_prediction(url,X2))
params_social = {'n_neighbors' : [x for x in range(2,100,4)],
'weights' : ['distance','uniform']}
gs_social = GridSearchCV(estimator = KNeighborsClassifier(),param_grid=params,
n_jobs=-1,cv=10,verbose=1)
gs_social.fit(X2,y2)
print gs_social.best_params_
print gs_social.best_score_
knn_social = gs_social.best_estimator_.fit(X2,y2)
knn_probs_social = get_probabilities(knn_social,X2)
knn_cross_val_score_social = get_cross_validation_score(X2,y2,knn_social,5)
knn_scores_social = knn_social.predict_proba(X2)[:,1]
knn_roc_score_social = get_roc_scores(y2,knn_scores_social)
params_rfc = {'max_depth': np.arange(20,100,5),
'min_samples_leaf': np.arange(90,200,5),
'n_estimators': [20]}
gs1_social = GridSearchCV(RandomForestClassifier(),param_grid=params_rfc, cv=10, scoring='roc_auc',n_jobs=-1,verbose=1)
gs1_social.fit(X2,y2)
rf_social = gs1_social.best_estimator_
rf_social.fit(X2,y2)
rf_cross_val_score_social = get_cross_validation_score(X2,y2,rf_social,5)
rf_scores_social = rf_social.predict_proba(X)[:,1]
rf_roc_score_social = get_roc_scores(y2,rf_scores_social)
print gs1_social.best_params_
print gs1_social.best_score_
print "Logistic Regression Cross-validation Score: ", cross_val_score_social
print "K Nearest Neighbors Cross-validation Score: ", knn_cross_val_score_social
print "RandomForest Cross-validation Score: ", rf_cross_val_score_social
print "Logistic Regressions ROC AUC score: ", roc_score_social
print "K Nearest Neighbors ROC AUC score: ", knn_roc_score_social
print "RandomForest ROC AUC score: ", rf_roc_score_social
np.mean(y)
simplereach.describe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We only care about the content type "Article"
Step2: But we need to get the tag name out of the url string for the tag
Step3: Import the table of URLs and total pageviews
Step4: Now we will explore how a KNN classifier does with our dataset
Step5: Now we will explore how a RandomForest classifier does with our dataset
Step6: Prediction of a given URL
Step7: Refining the model
Step8: Now we will rebuild our model to have it predict if an article will receive over 500 Facebook shares.
Step9: Logistic Regression with FB Shares > 500 as target
Step10: KNN with FB Shares > 500 as target
Step11: RandomForest with FB Shares > 500 as target
|
7,664
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set_style('white')
fpvals = 'schubert-sb-table.txt'
fotu = 'data/cdi_schubert_results/RDP/cdi_schubert.otu_table.100.denovo.rdp_assigned'
fmeta = 'data/cdi_schubert_results/cdi_schubert.metadata.txt'
pvals = pd.read_csv(fpvals, sep=' ')
pvals.index = pvals['otu']
df = pd.read_csv(fotu, sep='\t', index_col=0).T
abundf = df.divide(df.sum(axis=1), axis=0)
meta = pd.read_csv(fmeta, sep='\t', index_col=0)
pvals.head()
plt.hist(pvals['unadjusted'])
df.head()
meta.head()
pvals.columns
pvals.sort_values(by='qvalue', ascending=False)[['pval', 'qvalue', 'ubiquity']].head(15)
# Tidyfy the OTU table
df.index.name = 'sample'
tidydf = df.reset_index().melt(id_vars='sample', var_name='otu', value_name='abun')
# Add disease state
tidydf = tidydf.join(meta['DiseaseState'], on='sample')
tidydf.head()
otus = pvals.sort_values(by='qvalue', ascending=False).index[0:12].tolist()
fig, ax = plt.subplots(3, 4, figsize=(14,12))
ax = ax.flatten()
for i in range(len(ax)):
o = otus[i]
sns.stripplot(
data=tidydf.query('otu == @o'),
x='DiseaseState', y='abun',
ax=ax[i],
jitter=True)
# Tidyfy the realtive abundance OTU table
abundf.index.name = 'sample'
tidyabundf = abundf.reset_index().melt(id_vars='sample', var_name='otu', value_name='abun')
# Add disease state
tidyabundf = tidyabundf.join(meta['DiseaseState'], on='sample')
fig, ax = plt.subplots(3, 4, figsize=(14,12))
ax = ax.flatten()
for i in range(len(ax)):
o = otus[i]
sns.stripplot(
data=tidyabundf.query('otu == @o'),
x='DiseaseState', y='abun',
ax=ax[i],
jitter=True)
kept_otus = pvals['otu'].tolist()
df_fromR = df.loc[:, kept_otus]
keep_dis = ['H', 'CDI', 'nonCDI']
df_fromR = df_fromR.loc[meta.query('DiseaseState == @keep_dis').index, :]
df_fromR.shape
# Hm, okay - maybe my problems are coming from
df_fromR.shape, df_fromR.dropna().shape
# Recalculate ubiquity with python
fig, ax = plt.subplots(2, 2, figsize=(10, 8))
ax = ax.flatten()
ax[0].hist((df_fromR.dropna() > 0).sum() / df_fromR.dropna().shape[0])
ax[0].set_title('Ubiquity calculated from python')
ax[1].hist(pvals['ubiquity'])
ax[1].set_title('Ubiquity calculated from R')
ax[2].hist(np.log10((df_fromR.dropna() > 0).sum() / df_fromR.dropna().shape[0]))
ax[2].set_title('Ubiquity calculated from python')
ax[3].hist(np.log10(pvals['ubiquity']))
ax[3].set_title('Ubiquity calculated from R')
np.log10(0.02)
pvals.columns
pcols = [u'bonf', u'bh',
u'qvalue', u'ihw-a10',
u'bl-df03', u'lfdr']
fig, ax = plt.subplots(2, 3, figsize=(14, 8))
ax = ax.flatten()
i = 0
for c in pcols:
ax[i].scatter(pvals['unadjusted'], pvals[c])
ax[i].set_title(c)
if i > 2:
ax[i].set_xlabel('unadjusted')
if i in [0, 3]:
ax[i].set_ylabel('corrected')
i += 1
fig, ax = plt.subplots(1, 4, figsize=(14, 4))
pcols = ['unadjusted', 'ihw-a10', 'bl-df03', 'lfdr']
i = 0
for c in pcols:
ax[i].scatter(pvals['qvalue'], pvals[c])
ax[i].set_title(c)
ax[i].axhline(0.1)
ax[i].set_xlabel('qvalue')
i += 1
pvals.columns
pcols = ['qvalue', 'unadjusted', 'ihw-a10', 'bl-df03', 'lfdr']
fig, ax = plt.subplots(1, len(pcols), figsize=(15, 4))
i = 0
for c in pcols:
ax[i].scatter(pvals['effect_size'], pvals[c])
ax[i].set_title(c)
ax[i].set_xlabel('effect size')
i += 1
fig, ax = plt.subplots(1, len(pcols), figsize=(15, 4))
i = 0
for c in pcols:
ax[i].scatter(pvals['ubiquity'], -np.log10(pvals[c]))
ax[i].set_title(c)
ax[i].set_xlabel('ubiquity')
#ax[i]
i += 1
print('Ubiquity vs log10(pvalue)')
import numpy as np
fig, ax = plt.subplots(1, len(pcols), figsize=(15, 4))
i = 0
for c in pcols:
ax[i].scatter(pvals['ubiquity'].rank(), pvals[c])
ax[i].set_title(c)
ax[i].set_xlabel('ubiquity')
#ax[i]
i += 1
print('Rank ubiquity vs pvalue')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It looks like OTUs with an uncorrected pvalue of 0.9 get smushed down to 0.08 with qvalue - this seems fishy!
Step2: So these are, for the most part, singletons, maybe?
Step3: Hm, ok - so it looks like they're the same. I didn't mess up the ubiquity calculation...
Step4: Uncorrected pvalue vs. corrected pvalue?
Step5: Y-axis is the corrected qvalue (specified in subplot title), x-axis is the original pvalue
Step6: How does effect size correlate with pvalue and adjusted pvals?
Step7: And covariate?
|
7,665
|
<ASSISTANT_TASK:>
Python Code:
# This is our distribution (we assume gaussian)
data = np.random.normal(240, 25, 1000)
# we want to calcluate the 95CI (alpha = 0.95)
alpha = 0.95
# out data
data = np.random.normal(240, 25, 1000)
print "The sample mean is: ", data.mean()
# now we get the least of mean values using bootstrap in an iteration
# bootstrap leaves out 20%
n_size = int(len(data) * 0.80)
means = []
for iteration in range(0, 1000):
data_bootstrap = resample(data, n_samples=n_size)
means.append(data_bootstrap.mean())
# get the confidence intervals
# confidence intervals
p = ((1.0-alpha)/2.0) * 100
lower = np.percentile(means, p)
p = (alpha+((1.0-alpha)/2.0)) * 100
upper = np.percentile(means, p)
print('%.1f confidence interval %.1f%% and %.1f%%' % (alpha*100, lower, upper))
# plot The histogram of means
m = np.array(means).mean()
_ = plt.hist(means)
_ = plt.axvline(x=m, color='red')
_ = plt.axvline(x=lower, color='red')
_ = plt.axvline(x=upper, color='red');
# plot the histogram of initial data with the confidence intervals
_ = plt.hist(data)
_ = plt.axvline(x=m, color='red')
_ = plt.axvline(x=lower, color='red')
_ = plt.axvline(x=upper, color='red');
# This is the wanted confidence level
CL = 0.95
# we calculate the degrees of freedom
N = len(data)
dof = N-1 # degrees of freedom ~ N for big numbers.
# we use a t-distribution table (here from scipy)
# ppf: percent point function (inverse of cdf)
t_table_val = scipy.stats.t.ppf((1-CL)/2., dof)
m = np.mean(data)
h = np.std(data)/math.sqrt(N) * t_table_val
print t_table_val, np.std(data)/math.sqrt(N) , h, m-h, m+h
_ = plt.hist(data)
_ = plt.axvline(x=m, color='red')
_ = plt.axvline(x=m-h, color='red'); _ = plt.axvline(x=m+h, color='red')
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import summary_table
from statsmodels.sandbox.regression.predstd import wls_prediction_std
nmuestra = 100
x = np.linspace(0, 10, nmuestra)
e = np.random.normal(size=nmuestra)
y = 1 + 0.5*x + 2*e
X = sm.add_constant(x)
re = sm.OLS(y, X).fit()
st, data, ss2 = summary_table(re, alpha=0.05)
prstd, iv_l, iv_u = wls_prediction_std(re)
fittedvalues = data[:,2]
predict_mean_se = data[:,3]
predict_mean_ci_low, predict_mean_ci_upp = data[:,4:6].T
predict_ci_low, predict_ci_upp = data[:,6:8].T
# check we got the right things
print np.max(np.abs(re.fittedvalues - fittedvalues))
print np.max(np.abs(iv_l - predict_ci_low))
print np.max(np.abs(iv_u - predict_ci_upp))
plt.plot(x, y, 'o')
plt.plot(x, fittedvalues, '-', lw=2)
plt.plot(x, predict_ci_low, 'r--', lw=2)
plt.plot(x, predict_ci_upp, 'r--', lw=2)
plt.plot(x, predict_mean_ci_low, 'r--', lw=2)
plt.plot(x, predict_mean_ci_upp, 'r--', lw=2)
plt.show()
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score
# The typicall case where we calculate the scores (without any confidence intervals)
iris=datasets.load_iris()
x_train, x_test, y_train, y_test=train_test_split(iris.data,iris.target,test_size=0.33)
clf=KNeighborsClassifier(n_neighbors=3).fit(x_train, y_train)
a = accuracy_score(y_test, clf.predict(x_test))
p = precision_score(y_test, clf.predict(x_test), average='weighted')
r = recall_score(y_test, clf.predict(x_test), average='weighted')
print "accuracy: ",a, " precision: ", p, " recall: ", r
df = pd.DataFrame(data= np.c_[iris['data'], iris['target']],
columns= iris['feature_names'] + ['target'])
df.head(2)
# now we ll calculate the scores adding the CI using bootstrap
values = df.values
# configure bootstrap
n_iterations = 100
n_size = int(len(data) * 0.50)
# run bootstrap
stats = list()
for i in range(n_iterations):
try:
# prepare train and test sets
train = resample(values,replace=True, n_samples=n_size)
test = np.array([x for x in values if x.tolist() not in train.tolist()])
# fit model
model = KNeighborsClassifier(n_neighbors=5)
model.fit(train[:, :-1], train[:,-1])
a = accuracy_score(test[:,-1], model.predict(test[:,:-1]))
stats.append(a)
except:
pass
# plot scores
pyplot.hist(stats)
pyplot.show()
# confidence intervals
alpha = 0.95
p = ((1.0-alpha)/2.0) * 100
lower = max(0.0, np.percentile(stats, p))
p = (alpha+((1.0-alpha)/2.0)) * 100
upper = min(1.0, np.percentile(stats, p))
print('%.1f confidence interval %.1f%% and %.1f%%' % (alpha*100, lower*100, upper*100))
_ = plt.hist(stats)
_ = plt.axvline(x=np.array(stats).mean(), color='red')
_ = plt.axvline(x=lower, color='red'); _ = plt.axvline(x=upper, color='red')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: - using Bootstrap
Step2: - using t-distribution table
Step3: Use-Case
Step4: Use-Case
|
7,666
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
# Split all the text into words - All the possible words collections/choices
#words = text.split() # This is not the unique words
#words = {word: None for word in text.split()}
#words = token_lookup()
from collections import Counter
# Count the freq of words in the text/collection of words
word_counts = Counter(text)
# Having counted the frequency of the words in collection, sort them from most to least/top to bottom/descendng
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
# first enumerating for vocab to int
vocab_to_int = {words: ii for ii, words in enumerate(sorted_vocab)}
# into_to_vocab after enumerating through the sorted vocab
int_to_vocab = {ii: words for words, ii in vocab_to_int.items()}
# return the output results: a tuple of dicts(vocab_to_int, int_to_vocab)
# return dicts(vocab_to_int, int_to_vocab)
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
# Replace punctuation with tokens so we can use them in our model
token_dict = {'.': '||Period||',
',': '||Comma||',
'"': '||Quotation_Mark||',
';': '||Semicolon||',
'!': '||Exclamation_Mark||',
'?': '||Question_Mark||',
'(': '||Left_Parentheses||',
')': '||Right_Parentheses||',
'--': '||Dash||',
'\n': '||Return||'}
#token_dict.items() # to show it
return token_dict
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
input = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets')
lr = tf.placeholder(dtype=tf.float32, shape=None, name='learning_rate')
return input, targets, lr
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
# The number of LSTM cells/ Memory cells in one layer for RNN
rnn = tf.contrib.rnn.BasicLSTMCell(rnn_size) # rnn_size==LSTM_size??
# # Adding Dropout NOT needed/ Not Asked
# keep_prob = 1.0 # Drop out probability
# drop = tf.contrib.rnn.DropoutWrapper(rnn, keep_prob) #output_keep_prop=
# Stacking up multiple LSTM layers for DL
rnn_layers = 1 # layers
cell = tf.contrib.rnn.MultiRNNCell([rnn] * rnn_layers)
# Initializing the cell state using zero_state()
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(input=initial_state, name='initial_state')
return cell, initial_state
# Aras: Already implemented in sentiment network
# lstm_size = 256
# lstm_layers = 1
# batch_size = 500
# learning_rate = 0.001
# with graph.as_default():
# # Your basic LSTM cell
# lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# # Add dropout to the cell
# drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# # Stack up multiple LSTM layers, for deep learning
# cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# # Getting an initial state of all zerosoutput_keep_prop
# initial_state = cell.zero_state(batch_size, tf.float32)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
initial_state
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
# Size of embedding vectors (number of units in the emdding layer)
# embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embedding = tf.Variable(tf.random_uniform(shape=[vocab_size, embed_dim], minval=-1, maxval=1, dtype=tf.float32,
seed=None, name=None))
# tf.random_normal(mean=1.0, size/shape=[], stddev=0.1)
# tf.random_normal(shape=[vocab_size/n_words, embed_size/embed_dim], mean=0.0, stddev=1.0,
#dtype=tf.float32, seed=None, name=None)
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
# # Embedding implementation from Sentiment_RNN_solution.ipynb
# # Size of the embedding vectors (number of units in the embedding layer)
# embed_size = 300
# with graph.as_default():
# embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
# embed = tf.nn.embedding_lookup(embedding, inputs_)tf.nn.dynamic_rnn
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
# Create the RNN using the cells and the embedded input vectors
# outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=cell.state)
# initial_state=["initial_state"]
outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=inputs,
sequence_length=None,
initial_state=None,
dtype=tf.float32, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None)
# Naming the final_state using tf.identity(input, name)
final_state = tf.identity(input=final_state, name='final_state')
# Returning the outputs and the final_state
return outputs, final_state
# Aras: Implementation from Sentiment_RNN_Solution.ipynb
# with graph.as_default():
# outputs, final_state = tf.nn.dynamic_rnn(cell, embed,
# initial_state=initial_state)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
# embedding layer: def get_embed(input_data, vocab_size, embed_dim):
embed = get_embed(input_data=input_data, vocab_size=vocab_size, embed_dim=rnn_size)
# build rnn: def build_rnn(cell, inputs):
outputs, final_state = build_rnn(cell=cell, inputs=embed)
# Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
logits = tf.contrib.layers.fully_connected(inputs=outputs, num_outputs=vocab_size, activation_fn=None)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
# calculating the batch length, i.e. number words in a batch
batch_length = batch_size*seq_length # Remeber: batch_length != batch_size
# Number of bacthes in the give text with word IDs
num_batches = len(int_text)// batch_length
if (len(int_text)//batch_length) == (len(int_text)/batch_length):
num_batches -= 1
# preparing the numpy array first which is going to be returned/outputed
batches = np.zeros([num_batches, 2, batch_size, seq_length])
# number of words in the text (out dataset)
# get rid of the rest of the text which can fully be included in a batch based on the batch size
int_text = int_text[:(num_batches*batch_length)+1] # incremented one for the IO sequences/seq2seq learning
# Now based on the txt_size, batch_size, and seq_size/length, we should start getting the batches stochastically
#for batch_index/b_idx in range(start=0, stop=len(int_text), step=batch_size):
for batch_idx in range(0, num_batches, 1):
batch_slice = int_text[batch_idx*batch_length:(batch_idx+1)*batch_length+1]
# Slicing up the sequences inside a batch
#for seq_index/s_idx in range(start=0, stop=len(batch[??]), step=seq_length): # remember each sequence has two seq: input & output
for seq_idx in range(0, batch_size, 1):
batches[batch_idx, 0, seq_idx] = batch_slice[seq_idx*seq_length:(seq_idx+1)*seq_length]
batches[batch_idx, 1, seq_idx] = batch_slice[seq_idx*seq_length+1:((seq_idx+1)*seq_length)+1]
return batches
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 100 # depends on how fast the system is and how long we can wait to see the results
# Batch Size
batch_size = 64 # depends on the memory, num seq per batch
# RNN Size
rnn_size = 128 # Pixel and int/8 Bit
# Sequence Length
seq_length = 64 # the same as RNN width size/ number of mem cells in one layer
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 16 # 2^4 show every 16 batches learning/training
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
input = loaded_graph.get_tensor_by_name(name='input:0')
initial_state = loaded_graph.get_tensor_by_name(name='initial_state:0')
final_state = loaded_graph.get_tensor_by_name(name='final_state:0')
probs = loaded_graph.get_tensor_by_name(name='probs:0')
return input, initial_state, final_state, probs
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
# extractin the string/words out of the int_to_vocab.items
words = np.array([words for ids, words in int_to_vocab.items()])
# The generated random samples = numpy.random.choice(a, size=None, replace=True, p=None)¶
random_word = np.random.choice(a = words, size=None, replace=True, p=probabilities)
return random_word
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
7,667
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv("311-2014.csv", nrows=200000, low_memory = False)
df.head(3)
df.columns
type(df['Created Date'][0])
print(df['Created Date'][0])
dateutil.parser.parse(df['Created Date'][0])
def str_to_time(str_date):
datetype_date = dateutil.parser.parse(str_date)
return datetype_date
df['created_date'] = df['Created Date'].apply(str_to_time)
df.index=df['created_date']
df.head(3)
df.groupby("Complaint Type").count()['Unique Key'].sort_values(ascending=False)
plt.style.use('ggplot')
ax = df.groupby("Complaint Type").count()['Unique Key'].sort_values(ascending=True).tail(5).plot(kind='barh', figsize=(10,7))
ax.set_title("5 most frequent complaint types")
ax.set_xlabel("How many times was it filed?")
df.groupby('Borough').count()['Unique Key'].sort_values(ascending=False)
#https://en.wikipedia.org/wiki/Brooklyn
inhabitants=[{'state': "BROOKLYN", 'inhabitants':2621793},
{'state': "QUEENS", 'inhabitants': 2321580},
{'state': "MANHATTAN", 'inhabitants': 1636268},
{'state': "BRONX", 'inhabitants': 1438159},
{'state': "STATEN ISLAND", 'inhabitants': 473279},]
inhabitantsdf=pd.DataFrame(inhabitants)
inhabitantsdf
print("Complaints per capita: Brooklyn", 57129/2621793)
print("Complaints per capita: Queens", 46824/2321580)
print("Complaints per capita: Manhattan", 42050/1636268)
print("Complaints per capita: Bronx", 29610/1438159)
print("Complaints per capita: Staten Island", 7387/473279)
march_cases = df["2015-03"]['Unique Key'].count()
may_cases = df["2015-05"]['Unique Key'].count()
print("Cases filed in March:", march_cases)
print("Cases filed in May:", may_cases)
df["2015-04-01"]
df["2015-04-01"].groupby('Complaint Type').count()["Unique Key"].sort_values(ascending=False).head(1)
df["2015-04-01"].groupby('Complaint Type').count()["Unique Key"].sort_values(ascending=False).head(3)
ax = df.resample('m')['Unique Key'].count().plot(figsize=(10,5))
ax.set_title("Reports filed per month")
ax.set_ylabel("Number of complaints")
ax.set_xlabel("Month")
df.resample('m')['Unique Key'].count().max()
print("The month with most cases is May with",df.resample('m')['Unique Key'].count().max(), "cases.")
ax = df.resample('W')['Unique Key'].count().plot(figsize=(10,5))
ax.set_title("Reports filed per week")
ax.set_ylabel("Number of complaints")
ax.set_xlabel("Week")
# weeknumbers= list(range(1,52))
# ax.set_xticks(weeknumbers)
print("The week with most cases is the second in May with",df.resample('W')['Unique Key'].count().max(), "cases.")
noise_df = df[df["Complaint Type"].str.contains("Noise ")]
noiseax = noise_df.resample('W')['Unique Key'].count().plot(figsize=(10,5))
noiseax.set_title("Noise complaints filed over the year")
noiseax.set_ylabel("Number of noise-related complaints")
noiseax.set_xlabel("Time of year")
noisedayax = noise_df.groupby(by=noise_df.index.hour)['Unique Key'].count().plot(figsize=(10,5))
noisedayax.set_title("Hour when noise complaints are filed")
noisedayax.set_ylabel("Number of noise complaints")
noisedayax.set_xticks([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23])
noisedayax.set_xlabel("Hour of the day")
df.resample('D')['Unique Key'].count().sort_values(ascending=False).head(5)
df.resample('D')['Unique Key'].count().sort_values().tail(5).plot(kind='barh')
df.groupby(by=df.index.hour)['Unique Key'].count()
ax = df.groupby(by=df.index.hour)['Unique Key'].count().plot(figsize=(10,5))
ax.set_title("Hour when complaints are filed")
ax.set_ylabel("Number of complaints")
ax.set_xticks([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23])
ax.set_xlabel("Hour of the day")
# Midnight is is an outlier, complaint-number-wise
hourly_df = pd.DataFrame(df.groupby(df.index.hour)['Complaint Type'].value_counts())
hourly_df
#Most common complaint types at midnight
hourly_df['Complaint Type'][0].head(5)
#Most common complaint types the hour before midnight
hourly_df['Complaint Type'][23].head(5)
#Most common complaint types the hour after midnight
hourly_df['Complaint Type'][1].head(5)
midnight_df = df[df.index.hour==0]
minutely_df = midnight_df.groupby(by=midnight_df.index.minute)
minax = minutely_df['Unique Key'].count().plot(figsize=(15,5))
minax.set_title("Complaints filed per minute during midnight hour")
minax.set_xlabel("Minutes of the hour")
minax.set_ylabel("Numbers of complaints filed")
minax.set_xticks([0,5,10,15,20,25,30,35,40,45,50,55,60])
midnight_df['Agency'].value_counts().sort_values(ascending=False).head(5)
#Write test code for first agency
hpd_df = df[df['Agency'] == 'HPD']
ax = hpd_df.groupby(by=hpd_df.index.hour).count()['Unique Key'].plot(figsize=(12,7), label= 'HPD', legend=True, linewidth=2)
ax.set_title("Complaints per agency and time of the day")
ax.set_xlabel("Hour of the day")
ax.set_xticks([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23])
ax.set_ylabel("Number of complaints filed")
#Turn working code into a function:
def complaints_by_agency(agency_name):
complaints_agency = df[df['Agency'] == agency_name]
return complaints_agency.groupby(by=complaints_agency.index.hour).count()['Unique Key']
#run code for remaining agencies
for agency in ['NYPD', 'DOHMH', 'TLC', 'DOT']:
complaints_by_agency(agency).plot(ax=ax, label = agency, legend=True, linewidth=2)
#Write test code for first agency
#Copied code from above replacing groupby by resample('W')
hpd_df = df[df['Agency'] == 'HPD']
ax = hpd_df.resample('W')['Unique Key'].count().plot(figsize=(12,7), label= 'HPD', legend=True, linewidth=2)
ax.set_title("Complaints per agency and time of the year")
ax.set_xlabel("Week of the year")
ax.set_ylabel("Number of complaints filed")
#Turn working code into a function:
def agency_complaints_weekly(agency_name):
complaints_weekly = df[df['Agency'] == agency_name]
return complaints_weekly.resample('W')['Unique Key'].count()
#run code for remaining agencies
for agency in ['NYPD', 'DOHMH', 'TLC', 'DOT']:
agency_complaints_weekly(agency).plot(ax=ax, label = agency, legend=True, linewidth=2)
def ag_complaints(agency_name, str_date_s, str_date_e):
newdf = df[df['Agency'] == agency_name]
newdf.resample('M')
return newdf[str_date_s : str_date_e].groupby("Complaint Type")['Unique Key'].count().sort_values(ascending=False).head(10)
print("NYPD most popular complaints in July and August:")
ag_complaints('NYPD', '2015-07', '2015-08')
print("NYPD most popular complaints in May:")
ag_complaints('NYPD', '2015-05', '2015-05')
print("Most common complaints filed at the Housing Preservation Bureau (HPD) in winter")
ag_complaints('HPD', '2015-11', '2016-02')
print("Most common complaints filed at the Housing Preservation Bureau (HPD) in summer")
ag_complaints('HPD', '2015-05', '2016-09')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What was the most popular type of complaint, and how many times was it filed?
Step2: Make a horizontal bar graph of the top 5 most frequent complaint types.
Step3: Which borough has the most complaints per capita? Since it's only 5 boroughs, you can do the math manually.
Step4: Manhattan has the most complaints per capita.
Step5: I'd like to see all of the 311 complaints called in on April 1st.
Step6: What was the most popular type of complaint on April 1st?
Step7: What were the most popular three types of complaint on April 1st
Step8: What month has the most reports filed? How many? Graph it.
Step9: What week of the year has the most reports filed? How many? Graph the weekly complaints.
Step10: Noise complaints are a big deal. Use .str.contains to select noise complaints, and make an chart of when they show up annually. Then make a chart about when they show up every day (cyclic).
Step11: Which were the top five days of the year for filing complaints? How many on each of those days? Graph it.
Step12: What hour of the day are the most complaints? Graph a day of complaints.
Step13: One of the hours has an odd number of complaints. What are the most common complaints at that hour, and what are the most common complaints the hour before and after?
Step14: So odd. What's the per-minute breakdown of complaints between 12am and 1am? You don't need to include 1am.
Step15: Looks like midnight is a little bit of an outlier. Why might that be? Take the 5 most common agencies and graph the times they file reports at (all day, not just midnight).
Step16: Graph those same agencies on an annual basis - make it weekly. When do people like to complain? When does the NYPD have an odd number of complaints?
Step17: NYPD has an odd number after the first week in May
|
7,668
|
<ASSISTANT_TASK:>
Python Code:
g_dataset_name = "Notebook4Test"
g_fastq_counts_run_prefix = "TestSet4"
g_fastq_counts_dir = '~/dual_crispr/test_data/test_set_4'
g_collapsed_counts_run_prefix = ""
g_collapsed_counts_dir = '~/dual_crispr/test_outputs/test_set_4'
g_combined_counts_dir = ""
g_combined_counts_run_prefix = ""
import inspect
import ccbb_pyutils.analysis_run_prefixes as ns_runs
import ccbb_pyutils.files_and_paths as ns_files
import ccbb_pyutils.notebook_logging as ns_logs
def describe_var_list(input_var_name_list):
description_list = ["{0}: {1}\n".format(name, eval(name)) for name in input_var_name_list]
return "".join(description_list)
ns_logs.set_stdout_info_logger()
g_fastq_counts_dir = ns_files.expand_path(g_fastq_counts_dir)
g_collapsed_counts_run_prefix = ns_runs.check_or_set(g_collapsed_counts_run_prefix,
ns_runs.generate_run_prefix(g_dataset_name))
g_collapsed_counts_dir = ns_files.expand_path(ns_runs.check_or_set(g_collapsed_counts_dir, g_fastq_counts_dir))
g_combined_counts_run_prefix = ns_runs.check_or_set(g_combined_counts_run_prefix, g_collapsed_counts_run_prefix)
g_combined_counts_dir = ns_files.expand_path(ns_runs.check_or_set(g_combined_counts_dir, g_collapsed_counts_dir))
print(describe_var_list(['g_fastq_counts_dir','g_collapsed_counts_run_prefix','g_collapsed_counts_dir',
'g_combined_counts_run_prefix','g_combined_counts_dir']))
ns_files.verify_or_make_dir(g_collapsed_counts_dir)
ns_files.verify_or_make_dir(g_combined_counts_dir)
import dual_crispr.construct_counter as ns_counter
print(inspect.getsource(ns_counter.get_counts_file_suffix))
import dual_crispr.count_combination as ns_combine
print(inspect.getsource(ns_combine))
print(ns_files.summarize_filenames_for_prefix_and_suffix(g_fastq_counts_dir, g_fastq_counts_run_prefix,
ns_counter.get_counts_file_suffix()))
ns_combine.write_collapsed_count_files(g_fastq_counts_dir, g_collapsed_counts_dir, g_collapsed_counts_run_prefix,
g_fastq_counts_run_prefix, ns_counter.get_counts_file_suffix(),
ns_combine.get_collapsed_counts_file_suffix())
print(ns_files.check_file_presence(g_collapsed_counts_dir, g_collapsed_counts_run_prefix,
ns_combine.get_collapsed_counts_file_suffix(),
check_failure_msg="Count combination failed to produce collapsed count file(s)."))
ns_combine.write_combined_count_file(g_collapsed_counts_dir, g_combined_counts_dir, g_collapsed_counts_run_prefix,
g_combined_counts_run_prefix, ns_combine.get_collapsed_counts_file_suffix(),
ns_combine.get_combined_counts_file_suffix())
print(ns_files.check_file_presence(g_combined_counts_dir, g_collapsed_counts_run_prefix,
ns_combine.get_collapsed_counts_file_suffix(),
check_failure_msg="Count combination failed to produce combined count file."))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 4
Step1: Automated Set-Up
Step2: Count Combination Functions
Step3: Input Count Filenames
Step4: Count Combination Execution
|
7,669
|
<ASSISTANT_TASK:>
Python Code:
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import random
import copy
from Bio.PDB import *
from IPython.display import HTML, display
import tabulate
from __future__ import division
from IPython.display import Image
## Initialization
genes = 25
tfbs_pool = 10
tfbs_per_gene = 3
gene_pool = {}
for g in range(genes):
gene_pool[g] = random.sample(range(tfbs_pool), tfbs_per_gene)
steps_of_evolution = 100
p_gene_dublication = 0.05
p_gene_deletion = 0.25
p_tfbs_dublication = 0.45
p_tfbs_deletion = 0.25
p_vector = [p_gene_dublication, p_gene_deletion, p_tfbs_dublication, p_tfbs_deletion]
filename = "network-" + str(genes) + "-" + str(tfbs_pool) + "-" + str(tfbs_per_gene)
filename = filename + "-" + str(p_vector[0]) + "-" + str(p_vector[1]) + "-"
filename = filename + str(p_vector[2])+ "-" + str(p_vector[3]) + ".gml"
## Evolution
for s in range(steps_of_evolution):
r = np.random.choice(len(p_vector), p = p_vector)
if r == 0:
print "Evolution Step : " + str (s) + " Gene Dublication"
gene_to_dublicate = random.sample(range(genes), 1)[0]
gene_pool[genes] = copy.deepcopy(gene_pool[gene_to_dublicate])
genes = genes + 1;
elif r == 1:
print "Evolution Step : " + str (s) + " Gene Deletion"
gene_to_delete = random.sample(range(genes), 1)[0]
for i in range(gene_to_delete, genes-1):
gene_pool[i] = copy.deepcopy(gene_pool[i+1])
gene_pool.pop(genes - 1)
genes = genes-1
if genes == 0:
print "Gene Pool Empty !"
break
elif r == 2:
# print "Evolution Step : " + str (s) + " TFBS Dublication"
tfbs_probability = np.array(range(0, tfbs_pool))
for g in gene_pool:
for value in gene_pool[g]:
tfbs_probability[value] = tfbs_probability[value]+1
tfbs_probability = tfbs_probability.astype(np.float)
tfbs_probability = tfbs_probability / np.sum(tfbs_probability)
tfbs_to_dublicate = np.random.choice(tfbs_pool, p = tfbs_probability)
flag = False
while not flag:
gene_target = np.random.choice(gene_pool.keys())
if tfbs_to_dublicate not in gene_pool[gene_target]:
gene_pool[gene_target].append(tfbs_to_dublicate)
flag = True
else:
# print "Evolution Step : " + str (s) + " TFBS Deletion"
gene_target = np.random.choice(gene_pool.keys())
tfbs_to_delete = np.random.choice(gene_pool[gene_target])
gene_pool[gene_target].remove(tfbs_to_delete)
if len(gene_pool[gene_target]) == 0:
gene_to_delete = gene_target
for i in range(gene_to_delete, genes-1):
gene_pool[i] = copy.deepcopy(gene_pool[i+1])
gene_pool.pop(genes - 1)
genes = genes-1
if genes == 0:
print "Gene Pool Empty !"
break
## Building coevolution network
G = nx.Graph()
for g_a in gene_pool.keys():
for g_b in gene_pool.keys():
if not g_a == g_b:
if len(set(gene_pool[g_a]).intersection(gene_pool[g_b])) > 0:
G.add_edge(g_a, g_b)
nx.write_gml(G, 'gml files/' + filename)
gene_pool
print "Genes : 15"
print "TFBS : 5"
print "TFBS per gene : 2"
print "p_gene_dublication, p_gene_deletion, p_tfbs_dublication, p_tfbs_deletion"
print "0.25, 0.25, 0.25, 0.25"
Image("gml files/network-15-5-2-0.25-0.25-0.25-0.25.gml.png")
print "Genes : 15"
print "TFBS : 50"
print "TFBS per gene : 2"
print "p_gene_dublication, p_gene_deletion, p_tfbs_dublication, p_tfbs_deletion"
print "0.25, 0.25, 0.25, 0.25"
Image("gml files/network-15-50-2-0.25-0.25-0.25-0.25.gml.png")
print "Genes : 25"
print "TFBS : 10"
print "TFBS per gene : 3"
print "p_gene_dublication, p_gene_deletion, p_tfbs_dublication, p_tfbs_deletion"
print "0.25, 0.25, 0, 0.5"
Image("gml files/network-25-10-3-0.25-0.25-0-0.5.gml.png")
plt.subplot(211)
G1 = nx.read_gml('gml files/network-25-10-3-0.05-0.25-0.45-0.25.gml')
plt.hist(G1.degree().values())
plt.title("Gene Duplication < < TFBS Duplication")
plt.show()
plt.subplot(212)
G2 = nx.read_gml('gml files/network-25-10-3-0.49-0.01-0.49-0.01.gml')
plt.hist(G2.degree().values())
plt.title("Gene Duplication ~ TFBS Duplication")
plt.show()
def get_RIG(coordinates, labels, cut_off):
this function computes residue interaction graphs
RIG = nx.Graph()
label_ids = range(len(labels))
RIG.add_nodes_from(label_ids)
for i in label_ids:
for j in label_ids:
if not i == j:
if np.linalg.norm(coordinates[i] - coordinates[j]) < 7:
RIG.add_edge(i, j)
return RIG
def get_LIN(RIG, threshold):
this function computes long range network
LIN = nx.Graph(RIG)
for e in LIN.edges():
if not abs(e[0] - e[1]) == 1:
if abs(e[0] - e[1]) < threshold:
LIN.remove_edge(e[0], e[1])
return LIN
RIG_CUTOFF = 7
LIN_THRESHOLD = 12
parser = PDBParser()
pdb_files = ['1csp.pdb', '1hrc.pdb', '1pks.pdb', '2abd.pdb','3mef.pdb']
RIG = []
LIN = []
for pdb_file in pdb_files:
structure = parser.get_structure('pdb_file', 'pdb files/' + pdb_file)
coordinates = []
labels = []
for model in structure:
for chain in model:
for residue in chain:
try:
coordinates.append(residue['CA'].get_coord())
labels.append(residue.get_resname())
except KeyError:
pass
RIG.append(get_RIG(coordinates, labels, RIG_CUTOFF))
LIN.append(get_LIN(RIG[-1], LIN_THRESHOLD))
break ## working on chain id A only
break ## Working on model id 0 only
output = [['PBD ID', 'Nodes', 'Edges (RIG)', 'L (RIG)', 'C (RIG)', 'Edges (LIN)', 'L (LIN)', 'C (LIN)']]
for i in range(len(pdb_files)):
append_list = [pdb_files[i], RIG[i].number_of_nodes(), RIG[i].number_of_edges()]
append_list.append(nx.average_shortest_path_length(RIG[i]))
append_list.append(nx.average_clustering(RIG[i]))
append_list.append(LIN[i].number_of_edges())
append_list.append(nx.average_shortest_path_length(LIN[i]))
append_list.append(nx.average_clustering(LIN[i]))
output.append(append_list)
display(HTML(tabulate.tabulate(output, tablefmt='html')))
def get_Bartoli_RIG_Model(nodes, edges):
this function computes bartoli's model of residue interaction graphs
Bartoli_RIG_Model = nx.Graph()
Bartoli_RIG_Model.add_nodes_from(range(nodes))
# adding backbone chain
Bartoli_RIG_Model.add_path(range(nodes))
# making other links
d = {} # dictionary key: absolute difference, values: possible pairs
for i in range(nodes):
for j in range(nodes):
if abs(i-j) in d:
d[abs(i-j)].append((i, j))
else:
d[abs(i-j)] = [(i, j)]
del(d[0]) # not required
del(d[1]) # already handled in backbone
p = np.asarray([len(x) for x in d.values()]).astype(np.float)
p = p/np.sum(p)
while not nx.number_of_edges(Bartoli_RIG_Model) > edges:
x = random.choice(d[np.random.choice(d.keys())])
# np.random.choice(d[np.random.choice(d.keys(), p)])
Bartoli_RIG_Model.add_edge(x[0]-1, x[1]-1)
Bartoli_RIG_Model.add_edge(x[0]-1, x[1])
Bartoli_RIG_Model.add_edge(x[0]-1, x[1]+1)
Bartoli_RIG_Model.add_edge(x[0], x[1]-1)
Bartoli_RIG_Model.add_edge(x[0], x[1])
Bartoli_RIG_Model.add_edge(x[0], x[1]+1)
Bartoli_RIG_Model.add_edge(x[0]+1, x[1]-1)
Bartoli_RIG_Model.add_edge(x[0]+1, x[1])
Bartoli_RIG_Model.add_edge(x[0]+1, x[1]+1)
return Bartoli_RIG_Model
## Bartoli's model for protein contact map models.
Bartoli_RIG_Model = []
for rig in RIG:
nodes = nx.number_of_nodes(rig)
edges = nx.number_of_edges(rig)
Bartoli_RIG_Model.append(get_Bartoli_RIG_Model(nodes, edges))
output = [['PBD ID', 'Nodes', 'Edges (RIG)', 'L (RIG)', 'C (RIG)', 'Edges (Bartoli)', 'L (Bartoli)', 'C (Bartoli)']]
for i in range(len(pdb_files)):
append_list = [pdb_files[i], RIG[i].number_of_nodes(), RIG[i].number_of_edges()]
append_list.append(nx.average_shortest_path_length(RIG[i]))
append_list.append(nx.average_clustering(RIG[i]))
append_list.append(Bartoli_RIG_Model[i].number_of_edges())
append_list.append(nx.average_shortest_path_length(Bartoli_RIG_Model[i]))
append_list.append(nx.average_clustering(Bartoli_RIG_Model[i]))
output.append(append_list)
display(HTML(tabulate.tabulate(output, tablefmt='html')))
x = []
for e in Bartoli_RIG_Model[0].edges():
x.append(np.linalg.norm(e))
plt.hist(x, bins = 30)
plt.xlabel('cartesian distance amond amino acids')
plt.ylabel('number of amino acid contacts made')
plt.show()
def display_graph(G):
print "Nodes : " + str(G.number_of_nodes())
print "Edges : " + str(G.number_of_edges())
density = G.number_of_edges() / (G.number_of_nodes()*G.number_of_nodes()/2)
print "Sparseness : " + str(1-density)
try:
print "Characteristic Path Length (L) : " + str(nx.average_shortest_path_length(G))
except Exception as e:
print "Characteristic Path Length (L) : " + str(e)
print "Average Clustering Coefficient (C) : " + str(nx.average_clustering(G))
nx.draw_networkx(G)
plt.title('Network Layout')
plt.show()
plt.hist(G.degree()[0])
plt.title('Degree distribution')
plt.show()
n_protein = 100
n_protein_domains = 50
domain_per_protein = 3
protein_domains = range(n_protein_domains)
protein = np.asmatrix([random.sample(range(n_protein_domains), domain_per_protein) for x in range(n_protein)])
G_odd_odd = nx.Graph()
G_odd_odd.add_nodes_from(range(n_protein))
for x in range(n_protein):
for y in range(n_protein):
if not x == y:
if np.any(protein[x,] % 2) and np.any(protein[y,] % 2):
G_odd_odd.add_edge(x, y)
print "ODD-ODD PIN"
print "~~~~~~~~~~~"
print ""
display_graph(G_odd_odd)
G_even_even = nx.Graph()
G_even_even.add_nodes_from(range(n_protein))
for x in range(n_protein):
for y in range(n_protein):
if not x == y:
if not np.all(protein[x,] % 2) and not np.all(protein[y,] % 2):
G_even_even.add_edge(x, y)
print "EVEN-EVEN PIN"
print "~~~~~~~~~~~~~"
print ""
display_graph(G_even_even)
G_odd_even = nx.Graph()
G_odd_even.add_nodes_from(range(n_protein))
for x in range(n_protein):
for y in range(n_protein):
if not x == y:
if (np.any(protein[x,] % 2) and not np.all(protein[y,] % 2)) or (not np.all(protein[x,] % 2) and np.any(protein[y,] % 2)):
G_odd_even.add_edge(x, y)
print "ODD-EVEN PIN"
print "~~~~~~~~~~~~~"
print ""
display_graph(G_odd_even)
def is_prime(a):
if a < 3:
return False
return all(a % i for i in xrange(2, a))
G_prime_prime = nx.Graph()
G_prime_prime.add_nodes_from(range(n_protein))
for x in range(n_protein):
for y in range(n_protein):
if not x == y:
x_prime = []
y_prime = []
for z in range(domain_per_protein):
x_prime.append(is_prime(protein[x, z]))
y_prime.append(is_prime(protein[y, z]))
if any(x_prime) and any(y_prime):
G_prime_prime.add_edge(x, y)
print "PRIME PRIME PIN"
print "~~~~~~~~~~~~~~~"
print ""
display_graph(G_prime_prime)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 1
Step2: Final Coexpression network (images exported from Cytoscape)
Step3: Degree Distribution for cases <br>
Step6: Analysis and comclusion
Step8: Observations
Step9: Observations
Step10: Question 4
|
7,670
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import nsfg
import first
import thinkstats2
import thinkplot
preg = nsfg.ReadFemPreg()
live = preg[preg.outcome == 1]
hist = thinkstats2.Hist(live.birthwgt_lb, label='birthwgt_lb')
thinkplot.Hist(hist)
thinkplot.Config(xlabel='Birth weight (pounds)', ylabel='Count')
n = hist.Total()
pmf = hist.Copy()
for x, freq in hist.Items():
hist[x] = freq / n
thinkplot.Hist(pmf)
thinkplot.Config(xlabel='Birth weight (pounds)', ylabel='PMF')
pmf = thinkstats2.Pmf([1, 2, 2, 3, 5])
pmf
pmf.Prob(2)
pmf[2]
pmf.Incr(2, 0.2)
pmf[2]
pmf.Mult(2, 0.5)
pmf[2]
pmf.Total()
pmf.Normalize()
pmf.Total()
pmf = thinkstats2.Pmf(live.prglngth, label='prglngth')
thinkplot.Hist(pmf)
thinkplot.Config(xlabel='Pregnancy length (weeks)', ylabel='Pmf')
thinkplot.Pmf(pmf)
thinkplot.Config(xlabel='Pregnancy length (weeks)', ylabel='Pmf')
live, firsts, others = first.MakeFrames()
first_pmf = thinkstats2.Pmf(firsts.prglngth, label='firsts')
other_pmf = thinkstats2.Pmf(others.prglngth, label='others')
width=0.45
axis = [27, 46, 0, 0.6]
thinkplot.PrePlot(2, cols=2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left', width=width)
thinkplot.Config(xlabel='Pregnancy length(weeks)', ylabel='PMF', axis=axis)
thinkplot.PrePlot(2)
thinkplot.SubPlot(2)
thinkplot.Pmfs([first_pmf, other_pmf])
thinkplot.Config(xlabel='Pregnancy length(weeks)', axis=axis)
weeks = range(35, 46)
diffs = []
for week in weeks:
p1 = first_pmf.Prob(week)
p2 = other_pmf.Prob(week)
diff = 100 * (p1 - p2)
diffs.append(diff)
thinkplot.Bar(weeks, diffs)
thinkplot.Config(xlabel='Pregnancy length(weeks)', ylabel='Difference (percentage points)')
d = { 7: 8, 12: 8, 17: 14, 22: 4,
27: 6, 32: 12, 37: 8, 42: 3, 47: 2 }
pmf = thinkstats2.Pmf(d, label='actual')
def BiasPmf(pmf, label):
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
biased_pmf = BiasPmf(pmf, label='observed')
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, biased_pmf])
thinkplot.Config(xlabel='Class size', ylabel='PMF')
print('Actual mean', pmf.Mean())
print('Observed mean', biased_pmf.Mean())
def UnbiasPmf(pmf, label=None):
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf[x] *= 1/x
new_pmf.Normalize()
return new_pmf
unbiased = UnbiasPmf(biased_pmf, label='unbiased')
print('Unbiased mean', unbiased.Mean())
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, unbiased])
thinkplot.Config(xlabel='Class size', ylabel='PMF')
import numpy as np
import pandas
array = np.random.randn(4, 2)
df = pandas.DataFrame(array)
df
columns = ['A', 'B']
df = pandas.DataFrame(array, columns=columns)
df
index = ['a', 'b', 'c', 'd']
df = pandas.DataFrame(array, columns=columns, index=index)
df
df['A']
df.loc['a']
df.iloc[0]
indices = ['a', 'c']
df.loc[indices]
df['a':'c']
df[0:2]
resp = nsfg.ReadFemResp()
# Solution goes here
pmf = thinkstats2.Pmf(resp.numkdhh, label='actual')
# Solution goes here
print('actual')
thinkplot.pmf(pmf)
thinkplot.Config(xlabel='# of children', ylabel='PMF')
# Solution goes here
# biased numbers
biased = BiasPmf(pmf, label='Biased')
# Solution goes here
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, biased])
thinkplot.Config(xlabel='number of children', ylabel='pmf')
# Solution goes here
# mean of pmf
pmf.Mean()
# Solution goes here
# biased mean
biased.Mean()
live, firsts, others = first.MakeFrames()
preg_map = nsfg.MakePregMap(live)
# Solution goes here
hist = thinkstats2.Hist()
for i,j in preg_map.items():
# print('i=',i,'j=',j)
if len(j) >= 2:
pair = (preg.loc[j[0:2]].prglngth)
diff = np.diff(pair)[0]
hist[diff] += 1
# Solution goes here
thinkplot.Hist(hist)
# Solution goes here
import relay
results = relay.ReadResults()
speeds = relay.GetSpeeds(results)
speeds = relay.BinData(speeds, 3, 12, 100)
pmf = thinkstats2.Pmf(speeds, 'actual speeds')
thinkplot.Pmf(pmf)
thinkplot.Config(xlabel='Speed (mph)', ylabel='PMF')
# Solution goes here
def Observed_Pmf(pmf, speed, label=None):
mod = pmf.Copy(label=label)
for i in mod.Values():
# print('values=',i)
diff = abs(i -speed)
mod[i] *= diff
mod.Normalize()
return mod
newpmf = Observed_Pmf(pmf, 7, label='observed')
thinkplot.Pmf(newpmf)
thinkplot.Config(xlabel='speed',ylabel='pmf')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Again, I'll load the NSFG pregnancy file and select live births
Step2: Here's the histogram of birth weights
Step3: To normalize the disrtibution, we could divide through by the total count
Step4: The result is a Probability Mass Function (PMF).
Step5: More directly, we can create a Pmf object.
Step6: Pmf provides Prob, which looks up a value and returns its probability
Step7: The bracket operator does the same thing.
Step8: The Incr method adds to the probability associated with a given values.
Step9: The Mult method multiplies the probability associated with a value.
Step10: Total returns the total probability (which is no longer 1, because we changed one of the probabilities).
Step11: Normalize divides through by the total probability, making it 1 again.
Step12: Here's the PMF of pregnancy length for live births.
Step13: Here's what it looks like plotted with Hist, which makes a bar graph.
Step14: Here's what it looks like plotted with Pmf, which makes a step function.
Step15: We can use MakeFrames to return DataFrames for all live births, first babies, and others.
Step16: Here are the distributions of pregnancy length.
Step17: And here's the code that replicates one of the figures in the chapter.
Step18: Here's the code that generates a plot of the difference in probability (in percentage points) between first babies and others, for each week of pregnancy (showing only pregnancies considered "full term").
Step19: Biasing and unbiasing PMFs
Step20: This function computes the biased PMF we would get if we surveyed students and asked about the size of the classes they are in.
Step21: The following graph shows the difference between the actual and observed distributions.
Step22: The observed mean is substantially higher than the actual.
Step23: If we were only able to collect the biased sample, we could "unbias" it by applying the inverse operation.
Step24: We can unbias the biased PMF
Step25: And plot the two distributions to confirm they are the same.
Step26: Pandas indexing
Step27: We can specify column names when we create the DataFrame
Step28: We can also specify an index that contains labels for the rows.
Step29: Normal indexing selects columns.
Step30: We can use the loc attribute to select rows.
Step31: If you don't want to use the row labels and prefer to access the rows using integer indices, you can use the iloc attribute
Step32: loc can also take a list of labels.
Step33: If you provide a slice of labels, DataFrame uses it to select rows.
Step34: If you provide a slice of integers, DataFrame selects rows by integer index.
Step35: But notice that one method includes the last elements of the slice and one does not.
Step36: Exercise
Step37: Exercise
|
7,671
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# use seaborn plotting defaults
import seaborn as sns; sns.set()
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn');
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plt.plot([0.6], [2.1], 'x', color='red', markeredgewidth=2, markersize=10)
for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]:
plt.plot(xfit, m * xfit + b, '-k')
plt.xlim(-1, 3.5);
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none',
color='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5);
from sklearn.svm import SVC # "Support vector classifier"
model = SVC(kernel='linear', C=1E10)
model.fit(X, y)
def plot_svc_decision_function(model, ax=None, plot_support=True):
Plot the decision function for a 2D SVC
if ax is None:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
x = np.linspace(xlim[0], xlim[1], 30)
y = np.linspace(ylim[0], ylim[1], 30)
Y, X = np.meshgrid(y, x)
xy = np.vstack([X.ravel(), Y.ravel()]).T
P = model.decision_function(xy).reshape(X.shape)
# plot decision boundary and margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
# plot support vectors
if plot_support:
ax.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=300, linewidth=1,facecolors = 'none', edgecolor="black");
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(model);
model.support_vectors_
def plot_svm(N=10, ax=None):
X, y = make_blobs(n_samples=400, centers=2,
random_state=0, cluster_std=0.60)
X = X[:N]
y = y[:N]
model = SVC(kernel='linear', C=1E10)
model.fit(X, y)
ax = ax or plt.gca()
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
ax.set_xlim(-1, 4)
ax.set_ylim(-1, 6)
plot_svc_decision_function(model, ax)
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
for axi, N in zip(ax, [60, 120]):
plot_svm(N, axi)
axi.set_title('N = {0}'.format(N))
from ipywidgets import interact, fixed
interact(plot_svm, N=[10, 400], ax=fixed(None));
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.1, noise=.1)
clf = SVC(kernel='linear').fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(clf, plot_support=False);
(X ** 2)[0]
(X ** 2)[0].sum()
(X ** 2).sum(1)[0]
r = np.exp(-(X ** 2).sum(1))
from mpl_toolkits import mplot3d
def plot_3D(elev=30, azim=30, X=X, y=y):
ax = plt.subplot(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='autumn')
ax.view_init(elev=elev, azim=azim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
interact(plot_3D, elev=[-90, 90], azip=(-180, 180),
X=fixed(X), y=fixed(y));
clf = SVC(kernel='rbf', C=1E6)
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300, lw=1, facecolors='none');
X, y = make_blobs(n_samples=100, centers=2,
random_state=0, cluster_std=1)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn');
X, y = make_blobs(n_samples=100, centers=2,
random_state=0, cluster_std=1)
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
for axi, C in zip(ax, [50.0, 0.1]):
model = SVC(kernel='linear', C=C).fit(X, y)
axi.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(model, axi)
axi.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=300, lw=1, facecolors='none');
axi.set_title('C = {0:.1f}'.format(C), size=14)
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people(min_faces_per_person=57)
print(faces.target_names)
print(faces.images.shape)
fig, ax = plt.subplots(3, 5)
for i, axi in enumerate(ax.flat):
axi.imshow(faces.images[i], cmap='bone')
axi.set(xticks=[], yticks=[],
xlabel=faces.target_names[faces.target[i]])
from sklearn.svm import SVC
from sklearn.decomposition import RandomizedPCA
from sklearn.pipeline import make_pipeline
pca = RandomizedPCA(n_components=150, whiten=True, random_state=42)
svc = SVC(kernel='rbf', class_weight='balanced')
model = make_pipeline(pca, svc)
from sklearn.cross_validation import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(faces.data, faces.target,
random_state=42)
import warnings
warnings.filterwarnings("ignore")
from sklearn.grid_search import GridSearchCV
param_grid = {'svc__C': [1, 5, 10, 50],
'svc__gamma': [0.0001, 0.0005, 0.001, 0.005]}
grid = GridSearchCV(model, param_grid)
%time grid.fit(Xtrain, ytrain)
print(grid.best_params_)
model = grid.best_estimator_
yfit = model.predict(Xtest)
fig, ax = plt.subplots(4, 6)
for i, axi in enumerate(ax.flat):
axi.imshow(Xtest[i].reshape(62, 47), cmap='bone')
axi.set(xticks=[], yticks=[])
axi.set_ylabel(faces.target_names[yfit[i]].split()[-1],
color='black' if yfit[i] == ytest[i] else 'red')
fig.suptitle('Predicted Names; Incorrect Labels in Red', size=14);
from sklearn.metrics import classification_report
print(classification_report(ytest, yfit,
target_names=faces.target_names))
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(ytest, yfit)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=faces.target_names,
yticklabels=faces.target_names)
plt.xlabel('true label')
plt.ylabel('predicted label');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Motivating Support Vector Machines
Step2: A linear discriminative classifier would attempt to draw a straight line separating the two sets of data, and thereby create a model for classification.
Step3: These are three very different separators which, nevertheless, perfectly discriminate between these samples.
Step4: The line that maximizes this margin
Step6: To better visualize what's happening here, let's create a quick convenience function that will plot SVM decision boundaries for us
Step7: This is the dividing line that maximizes the margin between the two sets of points.
Step8: A key to this classifier's success is that for the fit, only the position of the support vectors matter;
Step9: In the right panel, we have doubled the number of training points, but the model has not changed
Step10: Beyond linear boundaries
Step11: No linear discrimination will ever be able to separate this data.
Step12: We can visualize this extra data dimension using a three-dimensional plot
Step13: We can see that with this additional dimension,
Step14: Using this kernelized support vector machine, we learn a suitable nonlinear decision boundary.
Step15: The SVM implementation has a bit of a fudge-factor which "softens" the margin
Step16: The optimal value of the $C$ parameter will depend on your dataset, and should be tuned using cross-validation or a similar procedure (refer back to Hyperparameters and Model Validation).
Step17: Let's plot a few of these faces to see what we're working with
Step18: Each image contains [62×47] or nearly 3,000 pixels.
Step19: For the sake of testing our classifier output, we will split the data into a training and testing set
Step20: Finally, we can use a grid search cross-validation to explore combinations of parameters.
Step21: The optimal values fall toward the middle of our grid; if they fell at the edges, we would want to expand the grid to make sure we have found the true optimum.
Step22: Let's take a look at a few of the test images along with their predicted values
Step23: We can get a better sense of our estimator's performance using the classification report,
Step24: We might also display the confusion matrix between these classes
|
7,672
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
n_classes = len(digits.target_names)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=512, max_depth=7, min_samples_leaf=30, random_state = 1960)
clf.fit(digits.data, digits.target)
#clf.__dict__
def test_ws_sql_gen(pickle_data):
WS_URL="http://localhost:1888/model"
b64_data = base64.b64encode(pickle_data).decode('utf-8')
data={"Name":"model1", "PickleData":b64_data , "SQLDialect":"postgresql"}
r = requests.post(WS_URL, json=data)
#print(r.__dict__)
content = r.json()
# print(content)
lSQL = content["model"]["SQLGenrationResult"][0]["SQL"]
return lSQL;
pickle_data = pickle.dumps(clf)
lSQL = test_ws_sql_gen(pickle_data)
N = len(lSQL)
P = 4000
print(lSQL[0:P] + "..." + lSQL[N//2:(N//2 + P)] + "..." + lSQL[-P:])
# save the dataset in a database table
engine = sa.create_engine('postgresql://db:db@localhost/db?port=5432' , echo=False)
conn = engine.connect()
lTable = pd.DataFrame(digits.data);
lTable.columns = ['Feature_' + str(c) for c in range(digits.data.shape[1])]
lTable['KEY'] = range(lTable.shape[0])
lTable.to_sql("INPUT_DATA" , conn, if_exists='replace', index=False)
sql_output = pd.read_sql(lSQL , conn);
sql_output = sql_output.sort_values(by='KEY').reset_index(drop=True)
conn.close()
sql_output.sample(12, random_state=1960)
sql_output.Decision.value_counts()
skl_outputs = pd.DataFrame()
skl_output_key = pd.DataFrame(list(range(X.shape[0])), columns=['KEY']);
skl_output_score = pd.DataFrame(columns=['Score_' + str(c) for c in range(n_classes)]);
skl_output_proba = pd.DataFrame(clf.predict_proba(X), columns=['Proba_' + str(c) for c in range(n_classes)])
skl_output_log_proba = pd.DataFrame(clf.predict_log_proba(X), columns=['LogProba_' + str(c) for c in range(n_classes)])
skl_output_decision = pd.DataFrame(clf.predict(X), columns=['Decision'])
skl_output = pd.concat([skl_output_key, skl_output_score, skl_output_proba, skl_output_log_proba, skl_output_decision] , axis=1)
skl_output.sample(12, random_state=1960)
sql_skl_join = skl_output.join(sql_output , how='left', on='KEY', lsuffix='_skl', rsuffix='_sql')
sql_skl_join.sample(12, random_state=1960)
condition = (sql_skl_join.Decision_sql != sql_skl_join.Decision_skl)
sql_skl_join[condition]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate SQL Code from the Model
Step2: Execute the SQL Code
Step3: Scikit-learn Prediction
Step4: Comparing the SQL and Scikit-learn Predictions
|
7,673
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import sys
from corner import corner
sys.path.append('.')
from src.mocklc import MockLC, SimulationSetup
from src.blendlpf import MockLPF
import src.plotting as pl
lc = MockLC(SimulationSetup('M', 0.1, 0.0, 0.15, 'short_transit', cteff=5500, know_orbit=False))
lc.create(wnsigma=[0.001, 0.001, 0.001, 0.001], rnsigma=0.00001, rntscale=0.5, nights=1);
lc.plot();
lpf = MockLPF('Example_1', lc)
lpf.print_parameters(columns=2)
lpf.optimize_global(1000)
lpf.plot_light_curves()
lpf.sample_mcmc(5000, reset=True, repeats=2)
df = lpf.posterior_samples()
pl.joint_radius_ratio_plot(df, fw=13, clim=(0.099, 0.12), htelim=(3570, 3630), ctelim=(4000,7500), blim=(0, 0.5), rlim=(3.8, 5.2));
pl.joint_contamination_plot(df, fw=13, clim=(0, 0.4), htelim=(3570, 3630), ctelim=(4000,7500), blim=(0, 0.5), rlim=(3.8, 5.2));
pl.marginal_radius_ratio_plot(df, bins=60, klim=(0.097, 0.12), figsize=(7,5));
corner(df.iloc[:,2:-3]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a mock light curve
Step2: Initialize the log posterior function
Step3: Optimize
Step4: Estimate the posterior
Step5: Analysis
Step6: Plot the apparent and true radius ratio posteriors
Step7: Make a corner plot to have a good overview to the posterior space
|
7,674
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from opsimsummary import OpSimOutput
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
opsout = OpSimOutput.fromOpSimDB('/Users/rbiswas/data/LSST/OpSimData/minion_1016_sqlite.db',
subset='_all')
opsout.summary.expMJD.size
opsout.summary.reset_index().drop_duplicates(subset='obsHistID').expMJD.size
maxdec = opsout.summary.query('propID == 54').fieldDec.max()
mindec = opsout.summary.query('propID == 54').fieldDec.min()
maxtheta = - mindec + np.pi /2.0
mintheta = - maxdec + np.pi / 2.0
area = 2 * np.pi * (np.cos(mintheta) - np.cos(maxtheta))
# Check that the sky fraction is ~ 0.5
area / 4 / np.pi
b1 = np.arccos((2 * np.pi * np.cos(mintheta) - area /3)/ 2 /np.pi)
b1dec = - b1 + np.pi/2
np.degrees(b1dec)
b2 = np.arccos(( area /3 + 2 * np.pi * np.cos(maxtheta) )/ 2 /np.pi)
b2dec = -b2 + np.pi/2.
np.degrees(b2dec)
def numFieldsinPatch(mindecval, maxdecval, patchName=None, propID=54):
#print mindecval, maxdecval#, patchname
if patchName is None:
patchName = ''
query_str ='fieldDec > {0} and fieldDec < {1}'.format(mindecval, maxdecval)
print (query_str)
dfpatch = opsout.summary.query(query_str)
numFields = len(dfpatch.reset_index().drop_duplicates(subset='obsHistID').fieldID.unique())
numFields_WFD = len(dfpatch.query('propID == {}'.format(propID)).fieldID.unique())
print('Total Number of fields in {2} patch is {0} and {1} are in WFD'.format(numFields,
numFields_WFD,
patchName))
return dfpatch, numFields, numFields_WFD
dfNorthPatch, numFieldsNorth, numFieldsNorthWFD = numFieldsinPatch('@b1dec', '@maxdec', patchName='North')
dfMiddlePatch, numFieldsMiddle, numFieldsMiddleWFD = numFieldsinPatch('@b2dec',
'@b1dec', patchName='Middle')
dfSouthPatch, numFieldsSouth, numFieldsSouthWFD = numFieldsinPatch('@mindec',
'@b2dec', patchName='South')
NorthFieldIDs = dfNorthPatch.query('propID == 54').fieldID
MiddleFielIDs = dfMiddlePatch.query('propID == 54').fieldID
SouthFielIDs = dfSouthPatch.query('propID == 54').fieldID
NorthFieldIDs.to_csv('NorthFieldIDs.csv', header=True, index=False)
MiddleFielIDs.to_csv('MiddleFieldIDs.csv', header=True, index=False)
SouthFielIDs.to_csv('SouthFieldIDs.csv', header=True, index=False)
fig, ax = plt.subplots()
m = Basemap(#llcrnrlat=-32., llcrnrlon=48.,
#urcrnrlat=-22., urcrnrlon=58,
projection='moll', lon_0=0., lat_0=0., ax=ax)
xN, yN = m(dfNorthPatch.fieldRA.apply(np.degrees).values,
dfNorthPatch.fieldDec.apply(np.degrees).values)
xM, yM = m(dfMiddlePatch.fieldRA.apply(np.degrees).values,
dfMiddlePatch.fieldDec.apply(np.degrees).values)
xS, yS = m(dfSouthPatch.fieldRA.apply(np.degrees).values,
dfSouthPatch.fieldDec.apply(np.degrees).values)
m.scatter(xN, yN, marker='.', color='k')
m.scatter(xM, yM, marker='.', color='r')
m.scatter(xS, yS, marker='.', color='b')
fig.savefig('DecBands.png')
NF = set(NorthFieldIDs.values.tolist())
MF = set(MiddleFielIDs.values.tolist())
SF = set(SouthFielIDs.values.tolist())
print(NF.intersection(MF))
print(NF.intersection(SF))
print(SF.intersection(MF))
numFieldsNorthWFD/np.float(numFieldsNorth), numFieldsMiddleWFD/np.float(numFieldsMiddle), numFieldsSouthWFD/np.float(numFieldsSouth),
numFieldsMiddleWFD / np.float(numFieldsNorthWFD) , numFieldsSouthWFD/np.float(numFieldsNorthWFD)
numFieldsMiddle / np.float(numFieldsNorth) , numFieldsSouth/np.float(numFieldsNorth)
x, nn1, nn2 = numFieldsinPatch('@b1dec', '@maxdec', patchName='North', propID=52)
x, nm1, nm2 = numFieldsinPatch('@b2dec', '@b1dec', patchName='Middle', propID=52)
x, ns1, ns2 = numFieldsinPatch('@mindec','@b2dec', patchName='South', propID=52)
dfall = opsout.summary.query('night > 365')
wfd = dfall.query('propID == 54')
numWFD = len(wfd)
numOther = len(dfall) - len(wfd)
numOther/ np.float(numWFD)
np.float(numWFD) / np.float(len(dfall))
np.radians(1.75)
opsout.summary.columns
FieldIDs = opsout.summary.query('propID == 54').drop_duplicates(subset='fieldID')[['fieldID', 'fieldRA', 'fieldDec']]
FieldIDs[['fieldRA', 'fieldDec']] = FieldIDs[['fieldRA', 'fieldDec']].apply(np.degrees)
FieldIDs['width'] = 0.03
rng = np.random.RandomState(0)
FieldIDs = FieldIDs.sample(frac=1, replace=False, random_state=rng)
groups = np.array_split(FieldIDs, 3)
groups[0].fieldID.size
- FieldIDs.fieldID.size
fig, ax = plt.subplots()
m = Basemap(#llcrnrlat=-32., llcrnrlon=48.,
#urcrnrlat=-22., urcrnrlon=58,
projection='moll', lon_0=0., lat_0=0., ax=ax)
xN, yN = m(groups[0].fieldRA.values,
groups[0].fieldDec.values)
xM, yM = m(groups[1].fieldRA.values,
groups[1].fieldDec.values)
xS, yS = m(groups[2].fieldRA.values,
groups[2].fieldDec.values)
m.scatter(xN, yN, marker='.', color='k')
m.scatter(xM, yM, marker='.', color='b')
m.scatter(xS, yS, marker='.', color='r')
fig.savefig('random_fields.png')
groups[0].to_csv('Group_0.csv')
groups[1].to_csv('Group_1.csv')
groups[2].to_csv('Group_2.csv')
dfall.propID.unique()
opsout.propIDDict
opsout.summary.query('propID == 53').night.max()
fig, ax = plt.subplots()
opsout.summary.query('propID == 55').night.hist(histtype='step',
bins=np.arange(0., 3650., 10.),
lw=2., ax=ax)
opsout.summary.query('propID == 52').night.hist(histtype='step',
bins=np.arange(0., 3650., 10.),
lw=2.,ax =ax)
opsout.summary.query('propID == 53').night.hist(histtype='step',
bins=np.arange(0., 3650., 10.),
lw=2.,ax=ax)
opsout.summary.query('propID == 54').night.hist(histtype='step',
bins=np.arange(0., 3650., 10.),
lw=2., ax=ax)
opsout.summary.query('propID == 56').night.hist(histtype='step',
bins=np.arange(0., 3650., 10.),
lw=2., ax=ax)
fig, ax = plt.subplots()
wfd.slewTime.hist(histtype='step', color='k', lw=2., alpha=1., ax=ax, bins=np.arange(0., 160., 5.), normed=1)
dfall.query('propID == 52').slewTime.hist(histtype='step', color='r',
ls='dashed', lw=2., ax=ax,
bins=np.arange(0., 160., 5.), normed=1)
np.pi * 8.4 * 1000 /2.
opsout.summary.plot(x='slewDist', y='slewTime', kind='scatter')
import seaborn as sns
sns.set_style('whitegrid')
opsout.summary.query('slewTime == 0.')
np.degrees(-1.082474)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Declination Boundaries for the WFD proposal
Step2: Total area covered, should be ~ 0.5 sky
Step3: Equations for the boundary values
Step4: Plots
Step5: Checks
Step6: Quick approximation
Step7: So
Step8: We will ignore the smaller number of fields in the Southern bands for now
Step9: So, I suppose a quick approximaton will be to do only the WFD survey during the last three
Step10: Check
Step11: Scratch
|
7,675
|
<ASSISTANT_TASK:>
Python Code:
!pip install wikipedia
import wikipedia
bky = wikipedia.page("Berkeley, California")
bky
bk_split = bky.content.split()
bk_split[:10]
!pip install nltk
import nltk
fdist1 = nltk.FreqDist(bk_split)
fdist1.most_common(10)
print(bky.references[:10])
print(bky.links[:10])
user_config=
family = 'wikipedia'
mylang = 'en'
usernames['wikipedia']['en'] = u'REPLACE THIS WITH YOUR USERNAME'
f = open('user-config.py', 'w')
f.write(user_config)
f.close()
!pip install pywikibot
import pywikibot
site = pywikibot.Site()
bky_page = pywikibot.Page(site, "Berkeley, California")
bky_page
# page text with all the wikimarkup and templates
bky_page_text = bky_page.text
# page text expanded to HTML
bky_page.expand_text()
# All the geographical coordinates linked in a page (may have multiple per article)
bky_page.coordinates()
from pywikibot import pagegenerators
cat = pywikibot.Category(site,'Category:Cities in Alameda County, California')
gen = cat.members()
gen
# create an empty list
coord_d = []
for page in gen:
print(page.title(), page.coordinates())
pc = page.coordinates()
for coord in pc:
# If the page is not a category
if(page.isCategory()==False):
coord_d.append({'label':page.title(), 'latitude':coord.lat, 'longitude':coord.lon})
coord_d[:3]
import pandas as pd
coord_df = pd.DataFrame(coord_d)
coord_df
bay_cat = pywikibot.Category(site,'Category:Universities and colleges in California')
bay_gen = bay_cat.members()
for page in bay_gen:
print(page.title(), page.isCategory(), page.coordinates())
telegraph_page = pywikibot.Page(site, u"Telegraph Avenue")
telegraph_backlinks = telegraph_page.backlinks
telegraph_backlinks()
for bl_page in telegraph_backlinks():
if(bl_page.namespace()==1):
print(bl_page.title())
telegraph_page.contributors()
bky_page = pywikibot.Page(site, "Berkeley, California")
bky_page.text
from pywikibot import textlib
import pandas as pd
bky_templates = textlib.extract_templates_and_params_regex(bky_page.text)
bky_templates[:5]
for template in bky_templates:
if(template[0]=="Infobox settlement"):
infobox = template[1]
infobox.keys()
print(infobox['elevation_ft'])
print(infobox['area_total_sq_mi'])
print(infobox['utc_offset_DST'])
print(infobox['population_total'])
print(infobox['government_type'])
print(infobox['website'])
bay_cat = pywikibot.Category(site,'Category:Cities_in_the_San_Francisco_Bay_Area')
bay_gen = bay_cat.members()
for page in bay_gen:
# If the page is not a category
if(page.isCategory()==False):
print(page.title())
page_templates = textlib.extract_templates_and_params_regex(page.text)
for template in page_templates:
if(template[0]=="Infobox settlement"):
infobox = template[1]
if 'elevation_ft' in infobox:
print(" Elevation (ft): ", infobox['elevation_ft'])
if 'population_total' in infobox:
print(" Population: ", infobox['population_total'])
if 'area_total_sq_mi' in infobox:
print(" Area (sq mi): ", infobox['area_total_sq_mi'])
power_cat = pywikibot.Category(site,'Category:Nuclear power stations in the United States by state')
power_gen = power_cat.members()
for page in power_gen:
print(page.title())
# If the page is not a category
if(page.isCategory()==False):
print("\n",page.title(),"\n")
page_templates = textlib.extract_templates_and_params_regex(page.text)
for template in page_templates:
if(template[0]=="Infobox power station"):
infobox = template[1]
if 'ps_units_operational' in infobox:
print(" Units operational:", infobox['ps_units_operational'])
if 'owner' in infobox:
print(" Owner:", infobox['owner'])
else:
for subpage in pywikibot.Category(site,page.title()).members():
print("\n",subpage.title())
subpage_templates = textlib.extract_templates_and_params_regex(subpage.text)
for template in subpage_templates:
if(template[0]=="Infobox power station"):
infobox = template[1]
if 'ps_units_operational' in infobox:
print(" Units operational:", infobox['ps_units_operational'])
if 'owner' in infobox:
print(" Owner:", infobox['owner'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this example, we will get the page for Berkeley, California and count the most commonly used words in the article. I'm using nltk, which is a nice library for natural language processing (although it is probably overkill for this).
Step2: There are many functions in a Wikipedia page object. We can also get all the Wikipedia articles that are linked from a page, all the URL links in the page, or all the geographical coordinates in the page.
Step4: Querying using pywikibot
Step5: Generators
Step6: Subcategories
Step7: Other interesting information from pages
Step8: Who has contributed to a page, and how many times have they edited?
Step9: Templates are all the extensions to wikimarkup that give you things like citations, tables, infoboxes, etc. You can iterate over all the templates in a page.
Step10: If you go to the raw text on the Wikipedia (by clicking the edit button), you can see that this is a little bit more ordered
Step11: We iterate through all the templates on the page until we find the one containing the "Infobox settlement" template.
Step12: However, sometimes parameters contain templates, such as citations or references.
Step13: Putting it all together
Step14: This is a script for Katy, getting data about U.S. Nuclear power plants. Wikipedia articles on nuclear power plants have many subcategories
|
7,676
|
<ASSISTANT_TASK:>
Python Code:
!pip3 install 'tfx==0.15.0' --upgrade
!python3 -m pip install 'kfp>=0.1.35' --quiet
# Directory and data locations (uses Google Cloud Storage).
import os
_input_bucket = '<your gcs bucket>'
_output_bucket = '<your gcs bucket>'
_pipeline_root = os.path.join(_output_bucket, 'tfx')
# Google Cloud Platform project id to use when deploying this pipeline.
_project_id = '<your project id>'
# copy the trainer code to a storage bucket as the TFX pipeline will need that code file in GCS
from tensorflow.compat.v1 import gfile
gfile.Copy('examples/penguin/penguin_utils_cloud_tuner.py', _input_bucket + '/penguin_utils_cloud_tuner.py')
%load https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/penguin/penguin_pipeline_kubeflow.py
import kfp
run_result = kfp.Client(
host=None # replace with Kubeflow Pipelines endpoint if this notebook is run outside of the Kubeflow cluster.
).create_run_from_pipeline_package('penguin_kubeflow.tar.gz', arguments={})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enable DataFlow API for your GKE cluster
Step2: Configure the TFX pipeline example
Step3: Submit pipeline for execution on the Kubeflow cluster
|
7,677
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import urllib
import gzip
import struct
def download_data(url, force_download=True):
fname = url.split("/")[-1]
if force_download or not os.path.exists(fname):
urllib.urlretrieve(url, fname)
return fname
def read_data(label_url, image_url):
with gzip.open(download_data(label_url)) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(download_data(image_url), 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
return (label, image)
path='http://yann.lecun.com/exdb/mnist/'
(train_lbl, train_img) = read_data(
path+'train-labels-idx1-ubyte.gz', path+'train-images-idx3-ubyte.gz')
(val_lbl, val_img) = read_data(
path+'t10k-labels-idx1-ubyte.gz', path+'t10k-images-idx3-ubyte.gz')
%matplotlib inline
import matplotlib.pyplot as plt
for i in range(10):
plt.subplot(1,10,i+1)
plt.imshow(train_img[i], cmap='Greys_r')
plt.axis('off')
plt.show()
print('label: %s' % (train_lbl[0:10],))
import mxnet as mx
def to4d(img):
return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255
batch_size = 100
train_iter = mx.io.NDArrayIter(to4d(train_img), train_lbl, batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(to4d(val_img), val_lbl, batch_size)
# Create a place holder variable for the input data
data = mx.sym.Variable('data')
# Flatten the data from 4-D shape (batch_size, num_channel, width, height)
# into 2-D (batch_size, num_channel*width*height)
data = mx.sym.Flatten(data=data)
# The first fully-connected layer
fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=128)
# Apply relu to the output of the first fully-connnected layer
act1 = mx.sym.Activation(data=fc1, name='relu1', act_type="relu")
# The second fully-connected layer and the according activation function
fc2 = mx.sym.FullyConnected(data=act1, name='fc2', num_hidden = 64)
act2 = mx.sym.Activation(data=fc2, name='relu2', act_type="relu")
# The thrid fully-connected layer, note that the hidden size should be 10, which is the number of unique digits
fc3 = mx.sym.FullyConnected(data=act2, name='fc3', num_hidden=10)
# The softmax and loss layer
mlp = mx.sym.SoftmaxOutput(data=fc3, name='softmax')
# We visualize the network structure with output size (the batch_size is ignored.)
shape = {"data" : (batch_size, 1, 28, 28)}
mx.viz.plot_network(symbol=mlp, shape=shape)
# @@@ AUTOTEST_OUTPUT_IGNORED_CELL
import logging
logging.getLogger().setLevel(logging.DEBUG)
model = mx.model.FeedForward(
symbol = mlp, # network structure
num_epoch = 10, # number of data passes for training
learning_rate = 0.1 # learning rate of SGD
)
model.fit(
X=train_iter, # training data
eval_data=val_iter, # validation data
batch_end_callback = mx.callback.Speedometer(batch_size, 200) # output progress for each 200 data batches
)
# @@@ AUTOTEST_OUTPUT_IGNORED_CELL
plt.imshow(val_img[0], cmap='Greys_r')
plt.axis('off')
plt.show()
prob = model.predict(val_img[0:1].astype(np.float32)/255)[0]
assert max(prob) > 0.99, "Low prediction accuracy."
print 'Classified as %d with probability %f' % (prob.argmax(), max(prob))
# @@@ AUTOTEST_OUTPUT_IGNORED_CELL
valid_acc = model.score(val_iter)
print 'Validation accuracy: %f%%' % (valid_acc *100,)
assert valid_acc > 0.95, "Low validation accuracy."
from IPython.display import HTML
import cv2
import numpy as np
def classify(img):
img = img[len('data:image/png;base64,'):].decode('base64')
img = cv2.imdecode(np.fromstring(img, np.uint8), -1)
img = cv2.resize(img[:,:,3], (28,28))
img = img.astype(np.float32).reshape((1,1,28,28))/255.0
return model.predict(img)[0].argmax()
'''
To see the model in action, run the demo notebook at
https://github.com/dmlc/mxnet-notebooks/blob/master/python/tutorials/mnist.ipynb.
'''
HTML(filename="mnist_demo.html")
data = mx.symbol.Variable('data')
# first conv layer
conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.sym.Activation(data=conv1, act_type="tanh")
pool1 = mx.sym.Pooling(data=tanh1, pool_type="max", kernel=(2,2), stride=(2,2))
# second conv layer
conv2 = mx.sym.Convolution(data=pool1, kernel=(5,5), num_filter=50)
tanh2 = mx.sym.Activation(data=conv2, act_type="tanh")
pool2 = mx.sym.Pooling(data=tanh2, pool_type="max", kernel=(2,2), stride=(2,2))
# first fullc layer
flatten = mx.sym.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.sym.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=10)
# softmax loss
lenet = mx.sym.SoftmaxOutput(data=fc2, name='softmax')
mx.viz.plot_network(symbol=lenet, shape=shape)
# @@@ AUTOTEST_OUTPUT_IGNORED_CELL
model = mx.model.FeedForward(
ctx = mx.gpu(0), # use GPU 0 for training, others are same as before
symbol = lenet,
num_epoch = 10,
learning_rate = 0.1)
model.fit(
X=train_iter,
eval_data=val_iter,
batch_end_callback = mx.callback.Speedometer(batch_size, 200)
)
assert model.score(val_iter) > 0.98, "Low validation accuracy."
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We plot the first 10 images and print their labels.
Step2: Next we create data iterators for MXNet. The data iterator, which is similar the iterator, returns a batch of data in each next() call. A batch contains several images with its according labels. These images are stored in a 4-D matrix with shape (batch_size, num_channels, width, height). For the MNIST dataset, there is only one color channel, and both width and height are 28. In addition, we often shuffle the images used for training, which accelerates the training progress.
Step3: Multilayer Perceptron
Step4: Now both the network definition and data iterators are ready. We can start training.
Step5: After training is done, we can predict a single image.
Step6: We can also evaluate the accuracy given a data iterator.
Step7: Even more, we can recognizes the digit written on the below box.
Step8: Convolutional Neural Networks
Step9: Note that LeNet is more complex than the previous multilayer perceptron, so we use GPU instead of CPU for training.
|
7,678
|
<ASSISTANT_TASK:>
Python Code:
!date
import numpy as np
import torch
import math
import matplotlib.mlab as mlab
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.autograd import Variable
import scipy.stats as stats
import scipy
import gc
from sklearn.utils import check_random_state
seed = 1337
random_number_generator = check_random_state(seed)
batch_size = 512
num_features = 1 # cos(A)
num_epochs = 300
gp_lambda = 5.0
beam_energy = 45.0 # In GeV
fermi_constant = 0.9
theta_true = np.array([beam_energy, fermi_constant])
def plot_observations(X_observed, theta_optimal, normed=True):
plt.grid(True)
plt.hist(X_observed, histtype="bar", range=(-1, 1), bins=100, normed=normed)
plt.xlim([-1, 1])
if normed:
plt.ylim([0, 2])
plt.ylabel("Normalized Number of Events")
else:
plt.ylim([0, 3000])
plt.ylabel("Number of Events")
plt.title(r"Distribution of $\cos(A)$ in $e^-e^+ \rightarrow \mu^-\mu^+$ events." + "\n"
r"$E_{beam}}$ = " + str(theta_optimal[0]) + "GeV - " + r"$G_f$ = " + str(theta_optimal[1]))
plt.xlabel(r"$\cos(A)$")
plt.show()
def random_gaussian(mu=[-1, 1], sigma=5.0):
return {'mu': np.random.uniform(mu[0], mu[1]),
'sigma': np.random.uniform(0.0, sigma)}
def draw_gaussian(d, num_samples, random_state=None):
num_parameters = len(d['mu'])
thetas = np.zeros((num_samples, num_parameters))
mu = d['mu']
sigma = d['sigma']
for i in range(0, num_parameters):
thetas[:, i] = stats.norm.rvs(size=num_samples,
loc=mu[i],
scale=sigma[i],
random_state=random_number_generator)
return thetas
def simulator(theta, n_samples, random_state=None):
rng = check_random_state(random_state)
samples = rej_sample_costheta(n_samples, theta, rng)
return samples.reshape(-1, 1)
def rej_sample_costheta(n_samples, theta, rng):
#sqrtshalf = theta[0] * (50 - 40) + 40
#gf = theta[1] * (1.5 - 0.5) + 0.5
sqrtshalf = theta[0]
gf = theta[1]
ntrials = 0
samples = []
x = np.linspace(-1, 1, num=1000)
maxval = np.max(diffxsec(x, sqrtshalf, gf))
while len(samples) < n_samples:
ntrials = ntrials + 1
xprop = rng.uniform(-1, 1)
ycut = rng.rand()
yprop = diffxsec(xprop, sqrtshalf, gf) / maxval
if (yprop / maxval) < ycut:
continue
samples.append(xprop)
return np.array(samples)
def diffxsec(costheta, sqrtshalf, gf):
norm = 2. * (1. + 1. / 3.)
return ((1 + costheta ** 2) + a_fb(sqrtshalf, gf) * costheta) / norm
def a_fb(sqrtshalf, gf):
mz = 90
gf_nom = 0.9
sqrts = sqrtshalf * 2.
a_fb_en = np.tanh((sqrts - mz) / mz * 10)
a_fb_gf = gf / gf_nom
return 2 * a_fb_en * a_fb_gf
# Show the different distributions of cos(A) for different beam energies.
for energy in range(40, 51, 5):
theta = np.array([energy, fermi_constant])
X_observed = simulator(theta, 100000, random_state=random_number_generator)
plot_observations(X_observed, theta)
# Obtain the samples from the experiment.
p_r_x = simulator(theta_true, 100000, random_state=random_number_generator)
# Draw the distribution.
plot_observations(p_r_x, theta_true)
# Obtain the samples from the experiment.
p_r_x = simulator([42, 1.6], 100000, random_state=random_number_generator)
# Draw the distribution.
plot_observations(p_r_x, theta_true)
# Initialize prior of p(theta | phi).
p_theta_phi = {'mu': [], 'sigma': []}
# Sample a random Gaussian for the beam energy.
g = random_gaussian(mu=[30, 60], sigma=1.0)
p_theta_phi['mu'].append(g['mu'])
p_theta_phi['sigma'].append(g['sigma'])
# Sample a random Gaussian for Fermi's paradox.
g = random_gaussian(mu=[0, 2], sigma=1.0)
p_theta_phi['mu'].append(g['mu'])
p_theta_phi['sigma'].append(g['sigma'])
# Obtain the parameterization of the beam energy prior.
prior_beam_energy_mu = p_theta_phi['mu'][0]
prior_beam_energy_sigma = p_theta_phi['sigma'][0]
prior_beam_energy_variance = prior_beam_energy_sigma ** 2
# Draw the prior over the beam enery.
x = np.linspace(30, 60, 1000)
plt.plot(x, mlab.normpdf(x, prior_beam_energy_mu, prior_beam_energy_sigma))
plt.xlim([30, 60])
plt.ylim([0, 2])
plt.xlabel("GeV")
plt.grid(True)
plt.title(r"Prior $p(E_{beam}|\Psi)$")
plt.show()
# Obtain the parameterization of Fermi's constant prior.
prior_gf_mu = p_theta_phi['mu'][1]
prior_gf_sigma = p_theta_phi['sigma'][1]
prior_gf_variance = prior_gf_sigma ** 2
# Draw the prior over the beam enery.
x = np.linspace(-2, 3, 1000)
plt.plot(x, mlab.normpdf(x, prior_gf_mu, prior_gf_sigma))
plt.xlim([-2, 3])
plt.ylim([0, 2])
plt.grid(True)
plt.title(r"Prior $p(G_f|\Psi)$")
plt.show()
thetas = draw_gaussian(p_theta_phi, batch_size // 2, random_state=random_number_generator)
theta_beam_energy = thetas[:, 0]
theta_fermi_constant = thetas[:, 1]
# Plot the histogram of the sampled beam energy.
plt.title(r"$E_{beam}$ histogram from $p(\theta|\psi)$")
plt.hist(theta_beam_energy, bins=100)
plt.grid(True)
plt.xlabel(r"$E_{beam}$ in GeV")
plt.ylabel("Number of occurences")
plt.show()
# Plot the histogram of the sampled Fermi's constant.
plt.title(r"$G_f$ histogram from $p(\theta|\psi)$")
plt.hist(theta_fermi_constant, bins=100)
plt.grid(True)
plt.xlabel(r"$G_f$")
plt.ylabel("Number of occurences")
plt.show()
# Create input-vector of generated features under theta (E_beam and G_f).
_x_generated = np.zeros((batch_size // 2, num_features))
# Apply the simulator to generate the samples.
for i, theta in enumerate(thetas):
sample = simulator(theta, 1, random_state=random_number_generator)
_x_generated[i, :] = sample
# Create input-vector of real features.
_x_real = np.zeros((batch_size // 2, num_features))
num_observations = len(p_r_x)
# Sample randomly from observed distribution.
for i in range(0, batch_size // 2):
index = random_number_generator.randint(low=0, high=num_observations)
sample = p_r_x[index]
_x_real[i, :] = sample.ravel()
# Construct the input vector.
x = np.vstack([_x_generated, _x_real])
# Construct the expected output of the critic.
y_expected = np.zeros((batch_size, 1))
# Set the real samples to 1.
y_expected[batch_size // 2:] = 1.0
class Critic(torch.nn.Module):
def __init__(self, num_features, num_hidden):
super(Critic, self).__init__()
self.fc_1 = torch.nn.Linear(num_features, num_hidden)
self.fc_2 = torch.nn.Linear(num_hidden, num_hidden)
self.fc_3 = torch.nn.Linear(num_hidden, 1)
def forward(self, x):
x = F.relu(self.fc_1(x))
x = F.relu(self.fc_2(x))
x = F.sigmoid(self.fc_3(x))
return x
# Define the critic with the specified number of hidden neurons.
num_hidden = 200
# Allocate the Critic with the specified configuration.
critic = Critic(num_features, num_hidden)
# Convert the Numpy array to a PyTorch Variable
x = torch.autograd.Variable(torch.from_numpy(x).float(), requires_grad=False)
y_expected = torch.autograd.Variable(torch.from_numpy(y_expected).float(), requires_grad=False)
# Allocate the Critic with the specified configuration.
critic = Critic(num_features, num_hidden)
# Do the prediction.
y = critic(x)
# Plot the histogram of the prediction.
plt.hist(y.data.numpy(), bins=100, normed=True)
plt.grid(True)
plt.xlabel(r"Prediction Distribution")
plt.ylabel("Number of Events")
plt.title("Prediction Histogram")
plt.show()
_x_r = Variable(torch.from_numpy(_x_real).float(), requires_grad=False)
_x_g = Variable(torch.from_numpy(_x_generated).float(), requires_grad=False)
def compute_gradient_penalty(net, real, fake):
epsilon = torch.rand(batch_size // 2, 1)
_x_hat = epsilon * real + ((1. - epsilon) * fake)
_x_hat = Variable(_x_hat, requires_grad=True)
y_hat = net(_x_hat)
gradients = torch.autograd.grad(outputs=y_hat, inputs=_x_hat, grad_outputs=torch.ones(y_hat.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
g = gradients + 1e-16
gradient_penalty = 5.0 * ((g.norm(2, dim=1) - 1) ** 2)
return gradient_penalty
optimizer = torch.optim.Adam(critic.parameters(), lr=0.0001)
for i in range(0, 10000):
# Reset the gradients.
critic.zero_grad()
# Train with real.
y_r = critic(_x_r)
# Train with fake.
y_g = critic(_x_g)
# Train with gradient penalty.
gp = compute_gradient_penalty(critic, _x_r.data, _x_g.data)
loss = y_g - y_r + gp
loss.mean().backward()
optimizer.step()
if i % 1000 == 0:
gc.collect()
gc.collect()
print("Wasserstein: " + str((y_g - y_r).mean().data.numpy()))
print("Gradient Penalty: " + str(gp.mean().data.numpy()))
print("Loss: " + str(loss.mean().data.numpy()))
# Do the prediction.
y = critic(x)
# Plot the histogram of the prediction.
plt.hist(y.data.numpy(), bins=100)
plt.grid(True)
plt.xlabel(r"Prediction Distribution")
plt.ylabel("Number of Events")
plt.title("Prediction Histogram")
plt.show()
new = plt.scatter(x.data.numpy(), y.data.numpy())
old = plt.scatter(x.data.numpy(), y_old.data.numpy())
plt.legend([new, old], ['t', 't - 1'])
plt.xlabel(r"$\cos(A)$")
plt.ylabel("Likelihood Classified as Real")
plt.title("Likelihood of being part of the distribution given input.")
plt.xlim([-1, 1])
plt.grid(True)
plt.show()
y_old = y
correct = 0
for e in y_r.data.numpy():
if e > 0.5:
correct += 1
for e in y_g.data.numpy():
if e < 0.5:
correct += 1
print("Number of correct instances: " + str(correct))
print("Accuracy: " + str(correct / batch_size * 100) + "%")
# Obtain the parameterization of Fermi's constant prior.
prior_gf_mu = p_theta_phi['mu'][1]
prior_gf_sigma = p_theta_phi['sigma'][1]
prior_gf_variance = prior_gf_sigma ** 2
# Draw the prior over the beam enery.
x = np.linspace(-2, 3, 1000)
plt.plot(x, mlab.normpdf(x, prior_gf_mu, prior_gf_sigma))
plt.xlim([-2, 3])
plt.ylim([0, 2])
plt.grid(True)
plt.title(r"Prior $p(G_f|\Psi)$")
plt.show()
# Obtain the parameterization of the beam energy prior.
prior_beam_energy_mu = p_theta_phi['mu'][0]
prior_beam_energy_sigma = p_theta_phi['sigma'][0]
prior_beam_energy_variance = prior_beam_energy_sigma ** 2
# Draw the prior over the beam enery.
x = np.linspace(30, 60, 1000)
plt.plot(x, mlab.normpdf(x, prior_beam_energy_mu, prior_beam_energy_sigma))
plt.xlim([30, 60])
plt.ylim([0, 2])
plt.xlabel("GeV")
plt.grid(True)
plt.title(r"Prior $p(E_{beam}|\Psi)$")
plt.show()
# Sample fake points from the simulator.
thetas = draw_gaussian(p_theta_phi, batch_size // 2, random_state=random_number_generator)
num_thetas = len(thetas)
_x_fake = np.zeros((batch_size // 2, num_features))
for i in range(0, num_thetas):
_x_fake[i, :] = simulator(thetas[i], 1, random_state=random_number_generator)
# Sample real points from the observed data.
_x_real = np.zeros((batch_size // 2, num_features))
num_observations = len(p_r_x)
# Sample randomly from observed distribution.
for i in range(0, batch_size // 2):
index = random_number_generator.randint(low=0, high=num_observations)
sample = p_r_x[index]
_x_real[i, :] = sample.ravel()
# Convert the samples to PyTorch variables.
x_f = Variable(torch.from_numpy(_x_fake)).float()
x_r = Variable(torch.from_numpy(_x_real)).float()
# Obtain the forward pass of both variables.
y_f = critic(x_f)
y_r = critic(x_r)
def approx_grad_u(params_proposal, i):
rng = check_random_state(i)
grad_u = {k: np.zeros(len(params_proposal[k]))
for k in params_proposal}
grad_ent = {k: np.zeros(len(params_proposal[k]))
for k in params_proposal}
thetas = gaussian_draw(params_proposal, batch_size, random_state=rng)
for theta in thetas:
x = simulator(theta, 1, random_state=rng)
dx = predict(x, state["params_critic"]).ravel()
grad_q = grad_gaussian_logpdf(params_proposal, theta)
for k, v in grad_q.items():
grad_u[k] += -dx * v
grad_entropy = grad_gaussian_entropy(params_proposal)
for k, v in grad_entropy.items():
grad_ent[k] += v
M = len(thetas)
for k in grad_u:
grad_u[k] = 1. / M * grad_u[k] + state["gamma"] * grad_ent[k]
return grad_u
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Complex computer simulators are increasingly used across fields of science as generative models tying parameters of an underlying theory to experimental observations. Inference in this setup is often difficult, as simulators rarely admit a tractable density or likelihood function. We introduce Adversarial Variational Optimization (AVO), a likelihood-free inference algorithm for fitting a non-differentiable generative model incorporating ideas from empirical Bayes and variational inference. We adapt the training procedure of generative adversarial networks by replacing the differentiable generative network with a domain-specific simulator. We solve the resulting non-differentiable minimax problem by minimizing variational upper bounds of the two adversarial objectives. Effectively, the procedure results in learning a proposal distribution over simulator parameters, such that the corresponding marginal distribution of the generated data matches the observations. We present results of the method with simulators producing both discrete and continuous data.
Step2: Globals
Step3: Utility Methods
Step4: Introduction
Step5: Experimental Data
Step6: Prior Initialization
Step7: Sample $\theta$'s from prior
Step8: Now we have sampled some thetas (both $E_{beam}$ and $G_f$ that have to be simulated in order to make a prediction, we can plot them to check if the match our prior over those parameters.
Step9: Constructing training data
Step10: Of course, adverserial training requires "true" samples as well. In the current approach, this is done be sampling from the true experimental information, i.e., observed data.
Step11: Vertically stack the generated and real vector to form a single training batch, and finally, construct an expected output vector to construct the loss of the critic.
Step12: Model
Step13: Forward pass of the critic
Step14: Backward pass of the critic
Step15: Proposal Distribution Update
Step16: Update
|
7,679
|
<ASSISTANT_TASK:>
Python Code:
print hex(1024)
print round(5.23222,2)
s = 'hello how are you Mary, are you feeling okay?'
retVal = 1
for word in s.split():
print word
for item in word:
# print item
if not item.islower():
# print item
print 'The string has Uppercase characters'
retVal = 0
break
print retVal
s.islower()
s = 'twywywtwywbwhsjhwuwshshwuwwwjdjdid'
s.count('w')
set1 = {2,3,1,5,6,8}
set2 = {3,1,7,5,6,8}
set1.difference(set2)
set1.intersection(set2)
{ val:val**3 for val in xrange(0,5)}
l = [1,2,3,4]
l[::-1]
l = [3,4,2,5,1]
sorted(l)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 2
Step2: Advanced Strings
Step3: Problem 4
Step4: Advanced Sets
Step5: Problem 6
Step6: Advanced Dictionaries
Step7: Advanced Lists
Step8: Problem 9
|
7,680
|
<ASSISTANT_TASK:>
Python Code:
import graphlab as gl
sf = gl.SFrame('data/people-example.csv')
sf # we can view first few lines of the table
sf.head()
sf.tail()
sf['Country']
sf['age'].mean()
sf
sf['Full Name'] = sf['First Name'] + ' ' + sf['Last Name']
sf
sf['Country']
def transform_country(country):
return 'United States' if country == 'USA' else country
transform_country('USA')
transform_country('India')
sf['Country'] = sf['Country'].apply(transform_country)
sf
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load a tabular dataset
Step2: SFrame basics
Step3: Inspect Dataset
Step4: Creating new columns
Step5: Apply Function for Data transformation
|
7,681
|
<ASSISTANT_TASK:>
Python Code:
def power(x , y , p ) :
res = 1
x = x % p
if(x == 0 ) :
return 0
while(y > 0 ) :
if(( y & 1 ) == 1 ) :
res =(res * x ) % p
y = y >> 1
x =(x * x ) % p
return res
N = 3
if(( N == 1 ) or(N == 2 ) ) :
print(62 )
else :
m =(10 ** 9 ) + 7
if(N % 2 == 0 ) :
k = N // 2
flag = True
else :
k =(N - 1 ) // 2
flag = False
if(flag ) :
a = power(62 , k , m )
print(a )
else :
a = power(62 ,(k + 1 ) , m )
print(a )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,682
|
<ASSISTANT_TASK:>
Python Code:
def pet_names(name1, name2):
print("Pet 1: ", name1)
print("Pet 2: ", name2)
pet1 = "King"
pet2 = "Reginald"
pet_names(pet1, pet2) # pet1 variable, then pet2 variable
pet_names(pet2, pet1) # notice we've switched the order in which they're passed to the function
pet1 = "Rocco"
pet2 = "Lucy"
pet_names(name1 = pet1, name2 = pet2)
pet_names(name2 = pet2, name1 = pet1)
# Here's our function with a default argument.
# x comes first (required), y comes second (default)
def pos_def(x, y = 10):
return x + y
# Here, we've specified both arguments, using the keyword format.
z = pos_def(x = 10, y = 20)
print(z)
# We're still using the keyword format, which allows us to reverse their ordering.
z = pos_def(y = 20, x = 10)
print(z)
# But *only* specifying the default argument is a no-no.
z = pos_def(y = 20)
print(z)
def make_pizza(*toppings):
print("Making a pizza with the following toppings:")
for topping in toppings:
print(" - ", topping)
make_pizza("pepperoni")
make_pizza("pepperoni", "banana peppers", "green peppers", "mushrooms")
def build_profile(**user_info):
profile = {}
for key, value in user_info.items(): # This is how you iterate through the key/value pairs of dictionaries.
profile[key] = value
return profile
profile = build_profile(firstname = "Shannon", lastname = "Quinn", university = "UGA")
print(profile)
profile = build_profile(name = "Shannon Quinn", department = "Computer Science")
print(profile)
def build_better_profile(firstname, lastname, *nicknames, **user_info):
profile = {'First Name': firstname, 'Last Name': lastname}
for key, value in user_info.items():
profile[key] = value
profile['Nicknames'] = nicknames
return profile
profile = build_better_profile("Shannon", "Quinn", "Professor", "Doctor", "Master of Science",
department = "Computer Science", university = "UGA")
for key, value in profile.items():
print(key, ": ", value)
def magic_function(x):
x = 20
print("Inside function: ", x)
x = 10
print("Before function: ", x)
magic_function(x)
print("After function: ", x)
def magic_function2(x):
x[0] = 20
print("Inside function: {}".format(x))
x = [10, 10]
print("Before function: {}".format(x))
magic_function2(x)
print("After function: ", x)
some_list = [1, 2, 3]
# some_list -> reference to my list
# [1, 2, 3] -> the actual, physical list
def set_to_none(some_list):
some_list = None # sets the reference "some_list" to point at nothing
print("In function: ", some_list)
a_list = [1, 2, 3]
print("Before function: {}".format(a_list))
set_to_none(a_list)
print(a_list)
def modify_int(x):
x = 9238493 # Works as long as we're in this function...once we leave, it goes away.
x = 10
modify_int(x)
print(x) # Even though we set x to some other value in the function, that was only a copy
def modify_list(x):
x[0] = 9238493 # Here, we're modifying a specific part of the object, so this will persist!
a_list = [1, 2, 3]
print(a_list) # Before
modify_list(a_list)
print(a_list) # After
x = [1, 2, 3] # Create a list, assign it to x.
y = x # Assign a new variable y to x.
x.append(4) # Append an element to the list x.
print(y) # Print ** y **
x = 5 # reassign x
print(x)
print(y) # same as before!
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this example, we switched the ordering of the arguments between the two function calls; consequently, the ordering of the arguments inside the function were also flipped. Hence, positional
Step2: As you can see, we used the names of the arguments from the function header itself (go back to the previous slide to see the definition of pet_names if you don't remember), setting them equal to the variable we wanted to use for that argument.
Step3: Part 2
Step4: Inside the function, the arguments are basically treated as a list
Step5: Instead of one star (*) in the function header, there are two (**). And yes, instead of a list when we get to the inside of the function, now we basically have a dictionary!
Step6: That last example had pretty much everything.
Step7: If we wrote another print statement, what would print out? 10? 20? Something else?
Step8: It prints 10. Before explaining, let's take another example.
Step9: What would a print statement now print out? [10, 10]? [20, 10]? Something else?
Step10: It prints [20, 10].
Step11: Whenever you operate on some_list, you have to traverse the "arrow" to the object itself, which is separate. Again, think of the house analogy
Step12: Now the function has finished; if we print out the list, what will we get?
Step13: "But," you begin, "you said objects like lists are pass-by-reference, and therefore any changes made in functions are permanent!"
Step14: When it comes to more "complicated" data types--strings, lists, dictionaries, sets, tuples, generators--we have to deal with two parts
Step15: Think of it this way
Step16: Notice how we called append on the variable x, and yet when we print y, we see the 4 there as well!
|
7,683
|
<ASSISTANT_TASK:>
Python Code:
#We can import the obspy module, and give it a shorter name (PS as you can see, the # symbol starts a comment)
import obspy as op #note we can start a comment after a line that has executable code on it.
#matplotlib is a large plotting package, with a submodule pyplot that we can import by itself like this
import matplotlib.pyplot as plt
#normally plots open in their own window, but we want a nice cohesive document - we can set inline plots by using
#the %matplotlib magic command (a magic command is not part of standard python, but instead a directive to the ipython
#shell that runs under the hood of Jupyter, that automatically configures matplotlib correctly).
%matplotlib inline
#Another useful magic command is %timeit, which runs tests to see how long it takes a particular statement to execute
#for example:
%timeit 1+1
#will tell us how long it takes Python to add two integers on your computer (not very long!).
#These magic commands will cause an error if you include them in a normal Python script, but they are very useful for
#Jupyter notebooks and IPython sessions.
import numpy as np #it is tradition to import numpy as np
import math #math is the basic python math library - it does not allow vector operations
#Square brackets create a list in Python
a_list = [1, 2, 3, 4]
#We can turn it into an array like so
a_numpy_array = np.array(a_list)
#We can also specify an array manually:
an_identity_array = np.array([[1, 0], [0, 1]])
#Parentheses create a tuple, which is an immutable data structure - useful for data we don't want to change
dims = (2, 4)
#Numpy has lots of matrix creation functionality - for instance, lets create a random matrix with dimensions
#given by dims. The documentation for numpy random normal is at
#https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.normal.html - give this a look!
a_random_2d_array = np.random.normal(1, 1, dims)
#Double quotes denote a string - textual information. We can use format statements like below to inject variables
#into strings.
print("A list: {0}".format(a_list))
print("An array: {0}".format(a_numpy_array))
print("An identity array: \n {0}".format(an_identity_array))
#note we have used a newline character \n to force the array onto a new line to make the printing nicer.
print("A random matrix: \n{0}".format(a_random_2d_array))
soon_to_be_not_an_identity_array = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
#set the last column (index 2) to 5
soon_to_be_not_an_identity_array[:, 2] = 5
print("Our new array is \n{0}".format(soon_to_be_not_an_identity_array))
#try clauses allow us a way to use code that may or may not work and deal with any errors they cause
try:
cos_a_list = math.cos(a_list)
print("cos of the list {0} = {1}".format(a_list, cos_a_list))
#we can catch a specific type of error as so - in this case, only TypeErrors will be handled, and anything else will
#crash the code; for more info see https://docs.python.org/3.5/tutorial/errors.html
except TypeError as e:
print("The math module doesn't operate on lists, only scalars!")
print("The cause of the exception is: {0}".format(e))
try:
cos_a_numpy_array = np.cos(a_numpy_array)
print("cos of the array {0} = {1}".format(a_numpy_array, cos_a_numpy_array))
except TypeError as e:
print("You won't see this message, as numpy operates on multidimensional arrays.")
print("The cause of the exception is: {0}".format(e))
#in python 3.5 + , we can use the @ symbol to perform matrix multiplication between numpy arrays:
#rank 1 arrays (like a_numpy_array) will be interpreted as either a row or column vector depending on context
#but if neither works,
try:
a_malformed_product = a_numpy_array @ a_random_2d_array
print("Matrix product between a 4-element vector {0} and a 2x4 matrix {1} = {2}".format(a_numpy_array,
a_random_2d_array,
a_malformed_product))
except ValueError as e:
print("Multiplying a 4-element vector and a 2x4 matrix is not a well defined operation")
print("The cause of the exception is: {0}".format(e))
try:
a_good_product = a_random_2d_array @ a_numpy_array
print("Matrix product between a 2x4 matrix \n {1} \n and a 4-element vector {0} = {2}".format(a_numpy_array,
a_random_2d_array,
a_good_product))
except ValueError as e:
print("You won't see this message, as we have performed a good matrix operation")
print("The cause of the exception is: {0}".format(e))
#linspace creates an array spanning an interval:
x = np.linspace(0, 2*np.pi)
y1 = np.sin(x)+x
#we can specify a label for a legend by using a keyword argument - these are
plt.plot(x, y1, label='Line 1')
#two asterisks (**) is exponentiation in Python
y2 = np.cos(x**2)
#we can specify particular formatting like so
plt.plot(x, y2, color='red', linestyle='dashed', label='Line 2')
#we can set the plot limits
plt.xlim(0, 2*np.pi)
#if we have given our lines labels, as we have, we can easily add a legend
plt.legend(loc="upper left")
#and we can add labels
plt.xlabel("x (m)", fontsize=16)
plt.ylabel("y (m)", fontsize=16)
x = np.random.uniform(0,10,100)
y = 1.5*x + np.random.normal(0,1,100)
plt.scatter(x,y)
plt.scatter(x, y)
plt.plot(x, G_mat@np.array([0, 1.5]))
plt.plot(x, G_mat@np.linalg.inv(G_mat.T@G_mat)@G_mat.T@y)
G_mat.T @ y
Our catalogue possibly contains multiple events, each with possibly multiple origin solutions.
Since we know our catalogue only has one event it must occupy index 0, and likewise we are interested in the
first origin solution. So we can get the event origin time by
evt_st = maule_catalogue[0].origins[0].time
Obtaining seismograms is similar to getting the catalogue; we specify the seismic network/station/location/channel,
and start/end times. In this case, we are interested in CI.USC..BH(ENZ) channels - that is, the Southern California
Seismic Network (CI) permanent station at the University of Southern California (USC).
We don't care about the "location" parameter as we know a priori that CI network stations don't have them, and
we want broadband high gain (BH) for all 3 components (we use the * wildcard to do this - alternatively
we could manually write BHZ,BHN,BHE)
seismograms = iris_client.get_waveforms(network = "CI",
station = "USC",
location = "",
channel = "BH*",
starttime = evt_st,
endtime = evt_st+3600)
#If the data is very complex, it can be sometimes useful to make lines thinner so you can see finer details
seismograms.plot(linewidth=0.5)
#Basically everything in Python is an object on which you can call methods; since these generally
#also return objects, we can call methods on those etc, allowing us to chain together operations like
#copying and filtering as shown below:
hf_seismograms = seismograms.copy().filter('bandpass', freqmin=0.5, freqmax=10)
hf_seismograms.plot(linewidth=0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: One of the key cornerstones of the scientific Python platform is Numpy, which is a highly optimized library for multidimensional arrays. It allows us to perform vectorized operations, which is not a standard feature of Python. In the next section, there will be a lot of information, but the key thing to keep in focus is that Numpy allows us to operate on arrays.
Step2: We can index Numpy arrays by slicing them, allowing us to extract and reassign elements at will. When slicing, a colon represents a range. The details of slicing are extensive, and you can review them at https
Step3: Numpy arrays, unlike lists, allow us to operate apply mathematical operations to all of their elements without writing loops.
Step4: It can be useful to think of 2d arrays as matricies, but numpy arrays can have as many dimensions as find useful
Step5: Since we have specified matplotlib to be inline, we can easily plot things as so
Step8: Downloading & Processing Waveforms
Step9: Lets plot the data to see what it looks like
Step10: Task 1.2
|
7,684
|
<ASSISTANT_TASK:>
Python Code:
import pylab as plt
import numpy as np
%matplotlib inline
from __future__ import division
from scipy.integrate import odeint,ode
from numpy import zeros,ones,eye,tanh,dot,outer,sqrt,linspace,cos,pi,hstack,zeros_like,abs,repeat
from numpy.random import uniform,normal,choice
%config InlineBackend.figure_format = 'retina'
def f1(x,t0):
return -x + g*dot(J,tanh(x))
N = 500
J = normal(0,sqrt(1/N),(N,N))
x0 = uniform(-0.5,0.5,N)
t = linspace(0,50,500)
plt.figure(figsize=(10,5))
for s,g in enumerate(linspace(0.5,2,3)):
plt.subplot(1,3,s+1)
x = odeint(f1,x0,t)
plt.plot(t,x[:,choice(N,10)])
plt.title('g = '+str(g),fontweight='bold')
plt.show()
target = lambda t0: cos(2 * pi * t0 / 50) # target pattern
def f3(t0, x, tanh_x):
return -x + g * dot(J, tanh_x) + dot(w, tanh_x) * u
dt = 1 # time step
tmax = 1000 # simulation length
tstop = 600
N = 300
J = normal(0, sqrt(1 / N), (N, N))
x0 = uniform(-0.5, 0.5, N)
g = 1.5
u = uniform(-1, 1, N)
w = uniform(-1 / sqrt(N), 1 / sqrt(N), N) # initial weights
P = eye(N) # Running estimate of the inverse correlation matrix
lr = 1 # learning rate
# simulation data: state, output, time, weight updates
x, z, t, wu = [x0], [], [0], [0]
# Set up ode solver
solver = ode(f3)
solver.set_initial_value(x0)
# Integrate ode, update weights, repeat
while t[-1] < tmax:
tanh_x = tanh(x[-1]) # cache
z.append(dot(w, tanh_x))
error = target(t[-1]) - z[-1]
q = dot(P, tanh_x)
c = lr / (1 + dot(q, tanh_x))
P = P - c * outer(q, q)
w = w + c * error * q
# Stop leaning here
if t[-1] > tstop:
lr = 0
solver.set_f_params(tanh_x)
wu.append(np.sum(np.abs(c * error * q)))
solver.integrate(solver.t + dt)
x.append(solver.y)
t.append(solver.t)
# last update for readout neuron
z.append(dot(w, tanh_x))
x = np.array(x)
t = np.array(t)
plt.figure(figsize=(10, 5))
plt.subplot(2, 1, 1)
plt.plot(t, target(t), '-r', lw=2)
plt.plot(t, z, '-b')
plt.legend(('target', 'output'))
plt.ylim([-1.1, 3])
plt.xticks([])
plt.subplot(2, 1, 2)
plt.plot(t, wu, '-k')
plt.yscale('log')
plt.ylabel('$|\Delta w|$', fontsize=20)
plt.xlabel('time', fontweight='bold', fontsize=16)
plt.show()
J[0,0]
for i in range(20):
plt.plot(t[:200], x[:,i][:200]);
def decode(x, rho):
xd = zeros_like(x)
xd[x > rho] = 1
xd[x < -rho] = -1
return xd
def f1(x,t0):
return -x + g*dot(J,tanh(x))
N = 500
J = normal(0,sqrt(1/N),(N,N))
x0 = uniform(-0.5,0.5,N)
t = linspace(0,50,500)
rho = uniform(0,0.1,N) # Rand thresholds!
# rho = 0.5 # fixed threshold!
plt.figure(figsize=(10,5))
for s,g in enumerate(linspace(0.5,1.5,3)):
plt.subplot(1,3,s+1)
x = odeint(f1,x0,t)
xd = decode(x, rho)
plt.plot(t,xd[:,choice(N,10)])
plt.title('g = '+str(g),fontweight='bold')
plt.ylim(-2,2)
plt.show()
target = lambda t0: cos(2 * pi * t0 / 50) # target pattern
def f3(t0, x):
return -x + g * dot(J, tanh_x) + dot(w, tanh_x) * u
dt = 1 # time step
tmax = 1000 # simulation length
tstop = 600
N = 300
J = normal(0, sqrt(1 / N), (N, N))
x0 = uniform(-0.5, 0.5, N)
g = 1.0
u = uniform(-1, 1, N)
w = uniform(-1 / sqrt(N), 1 / sqrt(N), N) # initial weights
P = eye(N) # Running estimate of the inverse correlation matrix
lr = .4 # learning rate
rho = repeat(0.05, N)
# simulation data: state,
# output, time, weight updates
x, z, t, wu = [x0], [], [0], [0]
# Set up ode solver
solver = ode(f3)
solver.set_initial_value(x0)
# Integrate ode, update weights, repeat
while t[-1] < tmax:
tanh_x = tanh(x[-1])
tanh_xd = decode(tanh_x, rho) # BINARY CODE INTRODUCED HERE!
z.append(dot(w, tanh_xd))
error = target(t[-1]) - z[-1]
q = dot(P, tanh_xd)
c = lr / (1 + dot(q, tanh_xd))
P = P - c * outer(q, q)
w = w + c * error * q
# Stop training time
if t[-1] > tstop:
lr = 0
wu.append(np.sum(np.abs(c * error * q)))
solver.integrate(solver.t + dt)
x.append(solver.y)
t.append(solver.t)
# last update for readout neuron
z.append(dot(w, tanh_x))
# plot
x = np.array(x)
t = np.array(t)
plt.figure(figsize=(10, 5))
plt.subplot(2, 1, 1)
plt.plot(t, target(t), '-r', lw=2)
plt.plot(t, z, '-b')
plt.legend(('target', 'output'))
plt.ylim([-1.1, 3])
plt.xticks([])
plt.subplot(2, 1, 2)
plt.plot(t, wu, '-k')
plt.yscale('log')
plt.ylabel('$|\Delta w|$', fontsize=20)
plt.xlabel('time', fontweight='bold', fontsize=16)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: FORCE
Step2: The loss function
Step3: FORCE does a pretty nice job learning how to be a sin wave. If you rerun this a few times, you'll see the quality of the fits varies. Such if woring with randomness and chaos.
Step4: Here's when the binary version looks like. I use random (U) selected thresholds to convert from rates to binary codes. Don't have any idea how this really works, so random seems as good a guess as any.
Step5: The binary loss function
|
7,685
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import theano
# By convention, the tensor submodule is loaded as T
import theano.tensor as T
# The theano.tensor submodule has various primitive symbolic variable types.
# Here, we're defining a scalar (0-d) variable.
# The argument gives the variable its name.
foo = T.scalar('foo')
# Now, we can define another variable bar which is just foo squared.
bar = foo**2
# It will also be a theano variable.
print type(bar)
print bar.type
# Using theano's pp (pretty print) function, we see that
# bar is defined symbolically as the square of foo
print theano.pp(bar)
# We can't compute anything with foo and bar yet.
# We need to define a theano function first.
# The first argument of theano.function defines the inputs to the function.
# Note that bar relies on foo, so foo is an input to this function.
# theano.function will compile code for computing values of bar given values of foo
f = theano.function([foo], bar)
print f(3)
# Alternatively, in some cases you can use a symbolic variable's eval method.
# This can be more convenient than defining a function.
# The eval method takes a dictionary where the keys are theano variables and the values are values for those variables.
print bar.eval({foo: 3})
# We can also use Python functions to construct Theano variables.
# It seems pedantic here, but can make syntax cleaner for more complicated examples.
def square(x):
return x**2
bar = square(foo)
print bar.eval({foo: 3})
A = T.matrix('A')
x = T.vector('x')
b = T.vector('b')
y = T.dot(A, x) + b
# Note that squaring a matrix is element-wise
z = T.sum(A**2)
# theano.function can compute multiple things at a time
# You can also set default parameter values
# We'll cover theano.config.floatX later
b_default = np.array([0, 0], dtype=theano.config.floatX)
linear_mix = theano.function([A, x, theano.Param(b, default=b_default)], [y, z])
# Supplying values for A, x, and b
print linear_mix(np.array([[1, 2, 3],
[4, 5, 6]], dtype=theano.config.floatX), #A
np.array([1, 2, 3], dtype=theano.config.floatX), #x
np.array([4, 5], dtype=theano.config.floatX)) #b
# Using the default value for b
print linear_mix(np.array([[1, 2, 3],
[4, 5, 6]]), #A
np.array([1, 2, 3])) #x
shared_var = theano.shared(np.array([[1, 2], [3, 4]], dtype=theano.config.floatX))
# The type of the shared variable is deduced from its initialization
print shared_var.type()
# We can set the value of a shared variable using set_value
shared_var.set_value(np.array([[3, 4], [2, 1]], dtype=theano.config.floatX))
# ..and get it using get_value
print shared_var.get_value()
shared_squared = shared_var**2
# The first argument of theano.function (inputs) tells Theano what the arguments to the compiled function should be.
# Note that because shared_var is shared, it already has a value, so it doesn't need to be an input to the function.
# Therefore, Theano implicitly considers shared_var an input to a function using shared_squared and so we don't need
# to include it in the inputs argument of theano.function.
function_1 = theano.function([], shared_squared)
print function_1()
# We can also update the state of a shared var in a function
subtract = T.matrix('subtract')
# updates takes a dict where keys are shared variables and values are the new value the shared variable should take
# Here, updates will set shared_var = shared_var - subtract
function_2 = theano.function([subtract], shared_var, updates={shared_var: shared_var - subtract})
print "shared_var before subtracting [[1, 1], [1, 1]] using function_2:"
print shared_var.get_value()
# Subtract [[1, 1], [1, 1]] from shared_var
function_2(np.array([[1, 1], [1, 1]]))
print "shared_var after calling function_2:"
print shared_var.get_value()
# Note that this also changes the output of function_1, because shared_var is shared!
print "New output of function_1() (shared_var**2):"
print function_1()
# Recall that bar = foo**2
# We can compute the gradient of bar with respect to foo like so:
bar_grad = T.grad(bar, foo)
# We expect that bar_grad = 2*foo
bar_grad.eval({foo: 10})
# Recall that y = Ax + b
# We can also compute a Jacobian like so:
y_J = theano.gradient.jacobian(y, x)
linear_mix_J = theano.function([A, x, b], y_J)
# Because it's a linear mix, we expect the output to always be A
print linear_mix_J(np.array([[9, 8, 7], [4, 5, 6]]), #A
np.array([1, 2, 3]), #x
np.array([4, 5])) #b
# We can also compute the Hessian with theano.gradient.hessian (skipping that here)
# Let's create another matrix, "B"
B = T.matrix('B')
# And, a symbolic variable which is just A (from above) dotted against B
# At this point, Theano doesn't know the shape of A or B, so there's no way for it to know whether A dot B is valid.
C = T.dot(A, B)
# Now, let's try to use it
C.eval({A: np.zeros((3, 4)), B: np.zeros((5, 6))})
# This tells Theano we're going to use test values, and to warn when there's an error with them.
# The setting 'warn' means "warn me when I haven't supplied a test value"
theano.config.compute_test_value = 'warn'
# Setting the tag.test_value attribute gives the variable its test value
A.tag.test_value = np.random.random((3, 4))
B.tag.test_value = np.random.random((5, 6))
# Now, we get an error when we compute C which points us to the correct line!
C = T.dot(A, B)
# We won't be using test values for the rest of the tutorial.
theano.config.compute_test_value = 'off'
# A simple division function
num = T.scalar('num')
den = T.scalar('den')
divide = theano.function([num, den], num/den)
print divide(10, 2)
# This will cause a NaN
print divide(0, 0)
# To compile a function in debug mode, just set mode='DebugMode'
divide = theano.function([num, den], num/den, mode='DebugMode')
# NaNs now cause errors
print divide(0, 0)
# You can get the values being used to configure Theano like so:
print theano.config.device
print theano.config.floatX
# You can also get/set them at runtime:
old_floatX = theano.config.floatX
theano.config.floatX = 'float32'
# Be careful that you're actually using floatX!
# For example, the following will cause var to be a float64 regardless of floatX due to numpy defaults:
var = theano.shared(np.array([1.3, 2.4]))
print var.type() #!!!
# So, whenever you use a numpy array, make sure to set its dtype to theano.config.floatX
var = theano.shared(np.array([1.3, 2.4], dtype=theano.config.floatX))
print var.type()
# Revert to old value
theano.config.floatX = old_floatX
class Layer(object):
def __init__(self, W_init, b_init, activation):
'''
A layer of a neural network, computes s(Wx + b) where s is a nonlinearity and x is the input vector.
:parameters:
- W_init : np.ndarray, shape=(n_output, n_input)
Values to initialize the weight matrix to.
- b_init : np.ndarray, shape=(n_output,)
Values to initialize the bias vector
- activation : theano.tensor.elemwise.Elemwise
Activation function for layer output
'''
# Retrieve the input and output dimensionality based on W's initialization
n_output, n_input = W_init.shape
# Make sure b is n_output in size
assert b_init.shape == (n_output,)
# All parameters should be shared variables.
# They're used in this class to compute the layer output,
# but are updated elsewhere when optimizing the network parameters.
# Note that we are explicitly requiring that W_init has the theano.config.floatX dtype
self.W = theano.shared(value=W_init.astype(theano.config.floatX),
# The name parameter is solely for printing purporses
name='W',
# Setting borrow=True allows Theano to use user memory for this object.
# It can make code slightly faster by avoiding a deep copy on construction.
# For more details, see
# http://deeplearning.net/software/theano/tutorial/aliasing.html
borrow=True)
# We can force our bias vector b to be a column vector using numpy's reshape method.
# When b is a column vector, we can pass a matrix-shaped input to the layer
# and get a matrix-shaped output, thanks to broadcasting (described below)
self.b = theano.shared(value=b_init.reshape(n_output, 1).astype(theano.config.floatX),
name='b',
borrow=True,
# Theano allows for broadcasting, similar to numpy.
# However, you need to explicitly denote which axes can be broadcasted.
# By setting broadcastable=(False, True), we are denoting that b
# can be broadcast (copied) along its second dimension in order to be
# added to another variable. For more information, see
# http://deeplearning.net/software/theano/library/tensor/basic.html
broadcastable=(False, True))
self.activation = activation
# We'll compute the gradient of the cost of the network with respect to the parameters in this list.
self.params = [self.W, self.b]
def output(self, x):
'''
Compute this layer's output given an input
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for layer input
:returns:
- output : theano.tensor.var.TensorVariable
Mixed, biased, and activated x
'''
# Compute linear mix
lin_output = T.dot(self.W, x) + self.b
# Output is just linear mix if no activation function
# Otherwise, apply the activation function
return (lin_output if self.activation is None else self.activation(lin_output))
class MLP(object):
def __init__(self, W_init, b_init, activations):
'''
Multi-layer perceptron class, computes the composition of a sequence of Layers
:parameters:
- W_init : list of np.ndarray, len=N
Values to initialize the weight matrix in each layer to.
The layer sizes will be inferred from the shape of each matrix in W_init
- b_init : list of np.ndarray, len=N
Values to initialize the bias vector in each layer to
- activations : list of theano.tensor.elemwise.Elemwise, len=N
Activation function for layer output for each layer
'''
# Make sure the input lists are all of the same length
assert len(W_init) == len(b_init) == len(activations)
# Initialize lists of layers
self.layers = []
# Construct the layers
for W, b, activation in zip(W_init, b_init, activations):
self.layers.append(Layer(W, b, activation))
# Combine parameters from all layers
self.params = []
for layer in self.layers:
self.params += layer.params
def output(self, x):
'''
Compute the MLP's output given an input
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for network input
:returns:
- output : theano.tensor.var.TensorVariable
x passed through the MLP
'''
# Recursively compute output
for layer in self.layers:
x = layer.output(x)
return x
def squared_error(self, x, y):
'''
Compute the squared euclidean error of the network output against the "true" output y
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for network input
- y : theano.tensor.var.TensorVariable
Theano symbolic variable for desired network output
:returns:
- error : theano.tensor.var.TensorVariable
The squared Euclidian distance between the network output and y
'''
return T.sum((self.output(x) - y)**2)
def gradient_updates_momentum(cost, params, learning_rate, momentum):
'''
Compute updates for gradient descent with momentum
:parameters:
- cost : theano.tensor.var.TensorVariable
Theano cost function to minimize
- params : list of theano.tensor.var.TensorVariable
Parameters to compute gradient against
- learning_rate : float
Gradient descent learning rate
- momentum : float
Momentum parameter, should be at least 0 (standard gradient descent) and less than 1
:returns:
updates : list
List of updates, one for each parameter
'''
# Make sure momentum is a sane value
assert momentum < 1 and momentum >= 0
# List of update steps for each parameter
updates = []
# Just gradient descent on cost
for param in params:
# For each parameter, we'll create a param_update shared variable.
# This variable will keep track of the parameter's update step across iterations.
# We initialize it to 0
param_update = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)
# Each parameter is updated by taking a step in the direction of the gradient.
# However, we also "mix in" the previous step according to the given momentum value.
# Note that when updating param_update, we are using its old value and also the new gradient step.
updates.append((param, param - learning_rate*param_update))
# Note that we don't need to derive backpropagation to compute updates - just use T.grad!
updates.append((param_update, momentum*param_update + (1. - momentum)*T.grad(cost, param)))
return updates
# Training data - two randomly-generated Gaussian-distributed clouds of points in 2d space
np.random.seed(0)
# Number of points
N = 1000
# Labels for each cluster
y = np.random.random_integers(0, 1, N)
# Mean of each cluster
means = np.array([[-1, 1], [-1, 1]])
# Covariance (in X and Y direction) of each cluster
covariances = np.random.random_sample((2, 2)) + 1
# Dimensions of each point
X = np.vstack([np.random.randn(N)*covariances[0, y] + means[0, y],
np.random.randn(N)*covariances[1, y] + means[1, y]])
# Plot the data
plt.figure(figsize=(8, 8))
plt.scatter(X[0, :], X[1, :], c=y, lw=.3, s=3, cmap=plt.cm.cool)
plt.axis([-6, 6, -6, 6])
plt.show()
# First, set the size of each layer (and the number of layers)
# Input layer size is training data dimensionality (2)
# Output size is just 1-d: class label - 0 or 1
# Finally, let the hidden layers be twice the size of the input.
# If we wanted more layers, we could just add another layer size to this list.
layer_sizes = [X.shape[0], X.shape[0]*2, 1]
# Set initial parameter values
W_init = []
b_init = []
activations = []
for n_input, n_output in zip(layer_sizes[:-1], layer_sizes[1:]):
# Getting the correct initialization matters a lot for non-toy problems.
# However, here we can just use the following initialization with success:
# Normally distribute initial weights
W_init.append(np.random.randn(n_output, n_input))
# Set initial biases to 1
b_init.append(np.ones(n_output))
# We'll use sigmoid activation for all layers
# Note that this doesn't make a ton of sense when using squared distance
# because the sigmoid function is bounded on [0, 1].
activations.append(T.nnet.sigmoid)
# Create an instance of the MLP class
mlp = MLP(W_init, b_init, activations)
# Create Theano variables for the MLP input
mlp_input = T.matrix('mlp_input')
# ... and the desired output
mlp_target = T.vector('mlp_target')
# Learning rate and momentum hyperparameter values
# Again, for non-toy problems these values can make a big difference
# as to whether the network (quickly) converges on a good local minimum.
learning_rate = 0.01
momentum = 0.9
# Create a function for computing the cost of the network given an input
cost = mlp.squared_error(mlp_input, mlp_target)
# Create a theano function for training the network
train = theano.function([mlp_input, mlp_target], cost,
updates=gradient_updates_momentum(cost, mlp.params, learning_rate, momentum))
# Create a theano function for computing the MLP's output given some input
mlp_output = theano.function([mlp_input], mlp.output(mlp_input))
# Keep track of the number of training iterations performed
iteration = 0
# We'll only train the network with 20 iterations.
# A more common technique is to use a hold-out validation set.
# When the validation error starts to increase, the network is overfitting,
# so we stop training the net. This is called "early stopping", which we won't do here.
max_iteration = 20
while iteration < max_iteration:
# Train the network using the entire training set.
# With large datasets, it's much more common to use stochastic or mini-batch gradient descent
# where only a subset (or a single point) of the training set is used at each iteration.
# This can also help the network to avoid local minima.
current_cost = train(X, y)
# Get the current network output for all points in the training set
current_output = mlp_output(X)
# We can compute the accuracy by thresholding the output
# and computing the proportion of points whose class match the ground truth class.
accuracy = np.mean((current_output > .5) == y)
# Plot network output after this iteration
plt.figure(figsize=(8, 8))
plt.scatter(X[0, :], X[1, :], c=current_output,
lw=.3, s=3, cmap=plt.cm.cool, vmin=0, vmax=1)
plt.axis([-6, 6, -6, 6])
plt.title('Cost: {:.3f}, Accuracy: {:.3f}'.format(float(current_cost), accuracy))
plt.show()
iteration += 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basics
Step2: Functions
Step3: theano.tensor
Step4: Shared variables
Step5: updates
Step6: Gradients
Step7: Debugging
Step8: The above error message is a little opaque (and it would be even worse had we not given the Theano variables A and B names). Errors like this can be particularly confusing when the Theano expression being computed is very complex. They also won't ever tell you the line number in your Python code where A dot B was computed, because the actual code being run is not your Python code-it's the compiled Theano code! Fortunately, "test values" let us get around this issue. N.b. - Not all theano methods (for example, and significantly, scan) allow for test values
Step9: Another place where debugging is useful is when an invalid calculation is done, e.g. one which results in nan. By default, Theano will silently allow these nan values to be computed and used, but this silence can be catastrophic to the rest of your Theano computation. At the cost of speed, we can instead have Theano compile functions in DebugMode, where an invalid computation causes an error
Step10: Using the CPU vs GPU
Step11: Example
Step12: MLP class
Step13: Gradient descent
Step14: Toy example
|
7,686
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
tf.reset_default_graph()
# Define our Dataset
X = np.array([[0,0],[0,1],[1,0],[1,1]])
Y = np.array([0,0,0,1]).reshape(-1,1)
# Define the tensorflow tensors
x = tf.placeholder(tf.float32, [None, 2], name='X') # inputs
y = tf.placeholder(tf.float32, [None, 1], name='Y') # outputs
W = tf.Variable(tf.zeros([2, 1]), name='W')
b = tf.Variable(tf.zeros([1,]), name='b')
# Define the model
pred = tf.nn.sigmoid(tf.matmul(x, W) + b) # Model
# Define the loss
with tf.name_scope("loss"):
loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred) + (1-y) * tf.log(1-pred), reduction_indices=1))
# Define the optimizer method you want to use
with tf.name_scope("optimizer"):
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# Include some Tensorboard visualization
writer_train = tf.summary.FileWriter("./my_model/")
# Start training session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer_train.add_graph(sess.graph)
for epoch in range(1000):
_, c, p = sess.run([optimizer, loss, pred], feed_dict={x: X,
y: Y})
print p, y
### Code here
### Code here
### code here
### code here
### Code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To visualize the graph you just created, launch tensorbord.
Step2: Print the weights of your model
Step3: Build a CNN to predict the MNIST digits
Step4: Print the weights of your model
Step5: Chose one (tell me what you chose...)
|
7,687
|
<ASSISTANT_TASK:>
Python Code:
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
from mne import read_evokeds
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
evoked.plot(exclude=[], time_unit='s')
# Show result as a 2D image (x: time, y: channels, color: amplitude)
evoked.plot_image(exclude=[], time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Show result as a butterfly plot
|
7,688
|
<ASSISTANT_TASK:>
Python Code:
# Tensorflow
import tensorflow as tf
print('Tested with TensorFLow 1.2.0')
print('Your TensorFlow version:', tf.__version__)
# Feeding function for enqueue data
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
# Rnn common functions
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
# Model builder
from tensorflow.python.estimator import model_fn as model_fn_lib
# Run an experiment
from tensorflow.contrib.learn.python.learn import learn_runner
# Helpers for data processing
import pandas as pd
import numpy as np
import argparse
import random
# data from: http://ai.stanford.edu/~amaas/data/sentiment/
TRAIN_INPUT = 'data/train.csv'
TEST_INPUT = 'data/test.csv'
# data manually generated
MY_TEST_INPUT = 'data/mytest.csv'
# wordtovec
# https://nlp.stanford.edu/projects/glove/
# the matrix will contain 400,000 word vectors, each with a dimensionality of 50.
word_list = np.load('word_list.npy')
word_list = word_list.tolist() # originally loaded as numpy array
word_list = [word.decode('UTF-8') for word in word_list] # encode words as UTF-8
print('Loaded the word list, length:', len(word_list))
word_vector = np.load('word_vector.npy')
print ('Loaded the word vector, shape:', word_vector.shape)
baseball_index = word_list.index('baseball')
print('Example: baseball')
print(word_vector[baseball_index])
max_seq_length = 10 # maximum length of sentence
num_dims = 50 # dimensions for each word vector
first_sentence = np.zeros((max_seq_length), dtype='int32')
first_sentence[0] = word_list.index("i")
first_sentence[1] = word_list.index("thought")
first_sentence[2] = word_list.index("the")
first_sentence[3] = word_list.index("movie")
first_sentence[4] = word_list.index("was")
first_sentence[5] = word_list.index("incredible")
first_sentence[6] = word_list.index("and")
first_sentence[7] = word_list.index("inspiring")
# first_sentence[8] = 0
# first_sentence[9] = 0
print(first_sentence.shape)
print(first_sentence) # shows the row index for each word
with tf.Session() as sess:
print(tf.nn.embedding_lookup(word_vector, first_sentence).eval().shape)
from os import listdir
from os.path import isfile, join
positiveFiles = ['positiveReviews/' + f for f in listdir('positiveReviews/') if isfile(join('positiveReviews/', f))]
negativeFiles = ['negativeReviews/' + f for f in listdir('negativeReviews/') if isfile(join('negativeReviews/', f))]
numWords = []
for pf in positiveFiles:
with open(pf, "r", encoding='utf-8') as f:
line=f.readline()
counter = len(line.split())
numWords.append(counter)
print('Positive files finished')
for nf in negativeFiles:
with open(nf, "r", encoding='utf-8') as f:
line=f.readline()
counter = len(line.split())
numWords.append(counter)
print('Negative files finished')
numFiles = len(numWords)
print('The total number of files is', numFiles)
print('The total number of words in the files is', sum(numWords))
print('The average number of words in the files is', sum(numWords)/len(numWords))
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(numWords, 50)
plt.xlabel('Sequence Length')
plt.ylabel('Frequency')
plt.axis([0, 1200, 0, 8000])
plt.show()
max_seq_len = 250
ids_matrix = np.load('ids_matrix.npy').tolist()
# Parameters for training
STEPS = 100000
BATCH_SIZE = 64
# Parameters for data processing
REVIEW_KEY = 'review'
SEQUENCE_LENGTH_KEY = 'sequence_length'
POSITIVE_REVIEWS = 12500
# copying sequences
data_sequences = [np.asarray(v, dtype=np.int32) for v in ids_matrix]
# generating labels
data_labels = [[1, 0] if i < POSITIVE_REVIEWS else [0, 1] for i in range(len(ids_matrix))]
# also creating a length column, this will be used by the Dynamic RNN
# see more about it here: https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
data_length = [max_seq_len for i in range(len(ids_matrix))]
data = list(zip(data_sequences, data_labels, data_length))
random.shuffle(data) # shuffle
data = np.asarray(data)
# separating train and test data
limit = int(len(data) * 0.9)
train_data = data[:limit]
test_data = data[limit:]
LABEL_INDEX = 1
def _number_of_pos_labels(df):
pos_labels = 0
for value in df:
if value[LABEL_INDEX] == [1, 0]:
pos_labels += 1
return pos_labels
pos_labels_train = _number_of_pos_labels(train_data)
total_labels_train = len(train_data)
pos_labels_test = _number_of_pos_labels(test_data)
total_labels_test = len(test_data)
print('Total number of positive labels:', pos_labels_train + pos_labels_test)
print('Proportion of positive labels on the Train data:', pos_labels_train/total_labels_train)
print('Proportion of positive labels on the Test data:', pos_labels_test/total_labels_test)
def get_input_fn(df, batch_size, num_epochs=1, shuffle=True):
def input_fn():
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/data
sequences = np.asarray([v for v in df[:,0]], dtype=np.int32)
labels = np.asarray([v for v in df[:,1]], dtype=np.int32)
length = np.asarray(df[:,2], dtype=np.int32)
dataset = (
tf.contrib.data.Dataset.from_tensor_slices((sequences, labels, length)) # reading data from memory
.repeat(num_epochs) # repeat dataset the number of epochs
.batch(batch_size)
)
# for our "manual" test we don't want to shuffle the data
if shuffle:
dataset = dataset.shuffle(buffer_size=100000)
# create iterator
review, label, length = dataset.make_one_shot_iterator().get_next()
features = {
REVIEW_KEY: review,
SEQUENCE_LENGTH_KEY: length,
}
return features, label
return input_fn
features, label = get_input_fn(train_data, 2)()
with tf.Session() as sess:
items = sess.run(features)
print(items[REVIEW_KEY])
print
items = sess.run(features)
print(items[REVIEW_KEY])
print
train_input_fn = get_input_fn(train_data, BATCH_SIZE, None)
test_input_fn = get_input_fn(test_data, BATCH_SIZE)
def get_model_fn(rnn_cell_sizes,
label_dimension,
dnn_layer_sizes=[],
optimizer='SGD',
learning_rate=0.01,
embed_dim=128):
def model_fn(features, labels, mode):
review = features[REVIEW_KEY]
sequence_length = tf.cast(features[SEQUENCE_LENGTH_KEY], tf.int32)
# Creating embedding
data = tf.Variable(tf.zeros([BATCH_SIZE, max_seq_len, 50]),dtype=tf.float32)
data = tf.nn.embedding_lookup(word_vector, review)
# Each RNN layer will consist of a LSTM cell
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in rnn_cell_sizes]
# Construct the layers
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# Runs the RNN model dynamically
# more about it at:
# https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
outputs, final_state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
# Slice to keep only the last cell of the RNN
last_activations = rnn_common.select_last_activations(outputs, sequence_length)
# Construct dense layers on top of the last cell of the RNN
for units in dnn_layer_sizes:
last_activations = tf.layers.dense(
last_activations, units, activation=tf.nn.relu)
# Final dense layer for prediction
predictions = tf.layers.dense(last_activations, label_dimension)
predictions_softmax = tf.nn.softmax(predictions)
loss = None
train_op = None
preds_op = {
'prediction': predictions_softmax,
'label': labels
}
eval_op = {
"accuracy": tf.metrics.accuracy(
tf.argmax(input=predictions_softmax, axis=1),
tf.argmax(input=labels, axis=1))
}
if mode != tf.estimator.ModeKeys.PREDICT:
loss = tf.losses.softmax_cross_entropy(labels, predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer=optimizer,
learning_rate=learning_rate)
return model_fn_lib.EstimatorSpec(mode,
predictions=predictions_softmax,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_op)
return model_fn
model_fn = get_model_fn(rnn_cell_sizes=[64], # size of the hidden layers
label_dimension=2, # since are just 2 classes
dnn_layer_sizes=[128, 64], # size of units in the dense layers on top of the RNN
optimizer='Adam',
learning_rate=0.001,
embed_dim=512)
# create experiment
def generate_experiment_fn():
Create an experiment function given hyperparameters.
Returns:
A function (output_dir) -> Experiment where output_dir is a string
representing the location of summaries, checkpoints, and exports.
this function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
All listed arguments in the outer function are used to create an
Estimator, and input functions (training, evaluation, serving).
Unlisted args are passed through to Experiment.
def _experiment_fn(run_config, hparams):
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
return tf.contrib.learn.Experiment(
estimator,
train_input_fn=train_input_fn,
eval_input_fn=test_input_fn,
train_steps=STEPS
)
return _experiment_fn
# run experiment
learn_runner.run(generate_experiment_fn(), run_config=tf.contrib.learn.RunConfig(model_dir='testing2'))
def generate_data_row(sentence, label):
length = max_seq_length
sequence = np.zeros((length), dtype='int32')
for i, word in enumerate(sentence):
sequence[i] = word_list.index(word)
return sequence, label, length
data_sequences = [np.asarray(v, dtype=np.int32) for v in ids_matrix]
# generating labels
data_labels = [[1, 0] if i < POSITIVE_REVIEWS else [0, 1] for i in range(len(ids_matrix))]
# also creating a length column, this will be used by the Dynamic RNN
# see more about it here: https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
data_length = [max_seq_len for i in range(len(ids_matrix))]
first_sentence[0] = word_list.index("i")
first_sentence[1] = word_list.index("thought")
first_sentence[2] = word_list.index("the")
first_sentence[3] = word_list.index("movie")
first_sentence[4] = word_list.index("was")
first_sentence[5] = word_list.index("incredible")
first_sentence[6] = word_list.index("and")
first_sentence[7] = word_list.index("inspiring")
# first_sentence[8] = 0
# first_sentence[9] = 0
print(first_sentence.shape)
print(first_sentence) # shows the row index for each word
preds = estimator.predict(input_fn=my_test_input_fn, as_iterable=True)
sentences = _get_csv_column(MY_TEST_INPUT, 'review')
print()
for p, s in zip(preds, sentences):
print('sentence:', s)
print('bad review:', p[0], 'good review:', p[1])
print('-' * 10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Data
Step2: We can also search our word list for a word like "baseball", and then access its corresponding vector through the embedding matrix.
Step3: Now that we have our vectors, our first step is taking an input sentence and then constructing the its vector representation. Let's say that we have the input sentence "I thought the movie was incredible and inspiring". In order to get the word vectors, we can use Tensorflow's embedding lookup function. This function takes in two arguments, one for the embedding matrix (the wordVectors matrix in our case), and one for the ids of each of the words. The ids vector can be thought of as the integerized representation of the training set. This is basically just the row index of each of the words. Let's look at a quick example to make this concrete.
Step4: TODO### Insert image
Step5: Before creating the ids matrix for the whole training set, let’s first take some time to visualize the type of data that we have. This will help us determine the best value for setting our maximum sequence length. In the previous example, we used a max length of 10, but this value is largely dependent on the inputs you have.
Step6: We can also use the Matplot library to visualize this data in a histogram format.
Step7: From the histogram as well as the average number of words per file, we can safely say that most reviews will fall under 250 words, which is the max sequence length value we will set.
Step8: Data
Step9: Parameters
Step10: Separating train and test data
Step11: Then, let's shuffle the data and use 90% of the reviews for training and the other 10% for testing.
Step12: Verifying if the train and test data have enough positive and negative examples
Step13: Input functions
Step14: Creating the Estimator model
Step16: Create and Run Experiment
Step17: Making Predictions
|
7,689
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib
#matplotlib.use('nbagg')
#%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10,3))
ax.set_ylim([-0.1,1.1])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title('Universe')
ax.plot([-1,1],[0,0],c='k')
plt.show()
fig, ax = plt.subplots(figsize=(10,3))
ax.set_ylim([-0.1,1.1])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title('Universe')
ax.plot([-1,1],[0,0],c='k')
ax.plot([0,0],[0,1],c='r')
plt.show()
from ipywidgets import interactive
def u(i=0):
fig, ax = plt.subplots(figsize=(10,3))
ax.set_ylim([-0.1,1.1])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title('Universe')
ax.plot([-1,1],[0,0],c='k')
ax.plot([0,0],[0,1],c='r')
plt.show()
interactive_plot = interactive(u, i=(0, 9999))
output = interactive_plot.children[-1]
interactive_plot
import sys
sys.path.append("../")
from Brain import GMM
p = GMM([0.5,0.5], np.array([[-1,.2], [1,.2]]))
t = np.linspace(-2,2,1000)
p_y = p(t)
fig, ax = plt.subplots(figsize=(10,3))
ax.plot(t,p_y)
plt.show()
samples, labels = p.sample(1000)
mu_ml = samples.mean()
lambda_ml = 1/samples.var()
q = GMM([1], np.array([[mu_ml,1/lambda_ml]]))
q_y = q(t)
fig, ax = plt.subplots(figsize=(10,3))
ax.plot(t,p_y)
ax.plot(t,q_y,ls='dashed')
m = [1]
l = [5]
t = np.linspace(-2,2,1000)
def call_back(i=0):
global m, l
mu = m[-1]
lamda = l[-1]
q = GMM([1], np.array([[mu,1/lamda]]))
q_y = q(t)
fig, ax = plt.subplots(3,1,figsize=(9,5))
ax[0].plot(t,p_y)
ax[0].plot(t,q_y,ls='dashed')
c = 1/(i+1)
x = samples[np.random.randint(0,1000)]
l.append(lamda + c*((x-mu)*(x-mu)-lamda))
m.append(mu + c*(x-mu))
ax[1].plot(m,label="$\mu$"); ax[1].legend()
ax[2].plot(l,label="$\lambda$"); ax[2].legend()
plt.show()
interactive_plot = interactive(call_back, i=(0, 999))
output = interactive_plot.children[-1]
interactive_plot
t = np.linspace(0,2,100)
q = GMM([1], np.array([[1,1/5]]))
q_y = q(t)
fig, ax = plt.subplots(figsize=(10,3))
ax.plot(t,q_y)
plt.show()
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
def f(mu,lamda,x):
x = np.array(x)
N = x.size
val = 0
c = lamda/2
for xi in x:
val += (xi-mu)**2
return np.exp((N/2)*np.log(c/np.pi) - c*val)
mu_min = -1
mu_max = 3
mu_step = 0.05
l_min = 0.1
l_max = 5
l_step = 0.05
mu,lamda = np.mgrid[mu_min:mu_max:mu_step,l_min:l_max:l_step]
mf = mu.flatten()
lf = lamda.flatten()
locs = np.where(1/lf<(1-mf)**2)[0]
z = f(mu,lamda,[1])
fig = plt.figure(figsize=(8,6))
ax = fig.gca(projection='3d')
ax.plot_surface(mu, lamda, z, rstride=5, cstride=5, alpha=0.3)
cset = ax.contour(mu, lamda, z, zdir='z', offset=0.04, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='x', offset=mu_min, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='y', offset=l_max, cmap=cm.coolwarm)
ax.set_xlabel('$\mu$')
ax.set_xlim(mu_min, mu_max)
ax.set_ylabel('$\lambda$')
ax.set_ylim(l_min, l_max)
ax.set_zlabel('$f$')
plt.show()
l_max = 50
l_step = 0.05
mu,lamda = np.mgrid[mu_min:mu_max:mu_step,l_min:l_max:l_step]
z = f(mu,lamda,[1])
fig = plt.figure(figsize=(8,6))
ax = fig.gca(projection='3d')
ax.plot_surface(mu, lamda, z, rstride=5, cstride=5, alpha=0.3)
cset = ax.contour(mu, lamda, z, zdir='z', offset=0, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='x', offset=mu_min, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='y', offset=l_max, cmap=cm.coolwarm)
ax.set_xlabel('$\mu$')
ax.set_xlim(mu_min, mu_max)
ax.set_ylabel('$\lambda$')
ax.set_ylim(l_min, l_max)
ax.set_zlabel('$f$')
plt.show()
l_max = 5000
l_step = 5
mu,lamda = np.mgrid[mu_min:mu_max:mu_step,l_min:l_max:l_step]
z = f(mu,lamda,[1])
fig = plt.figure(figsize=(8,6))
ax = fig.gca(projection='3d')
ax.plot_surface(mu, lamda, z, rstride=5, cstride=5, alpha=0.3)
cset = ax.contour(mu, lamda, z, zdir='z', offset=0, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='x', offset=mu_min, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='y', offset=l_max, cmap=cm.coolwarm)
ax.set_xlabel('$\mu$')
ax.set_xlim(mu_min, mu_max)
ax.set_ylabel('$\lambda$')
ax.set_ylim(l_min, l_max)
ax.set_zlabel('$f$')
plt.show()
l_max = 15
l_step = 0.1
mu,lamda = np.mgrid[mu_min:mu_max:mu_step,l_min:l_max:l_step]
z = f(mu,lamda,[1,2])
fig = plt.figure(figsize=(8,6))
ax = fig.gca(projection='3d')
ax.plot_surface(mu, lamda, z, rstride=5, cstride=5, alpha=0.3)
cset = ax.contour(mu, lamda, z, zdir='z', offset=0, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='x', offset=mu_min, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='y', offset=l_max, cmap=cm.coolwarm)
ax.set_xlabel('$\mu$')
ax.set_xlim(mu_min, mu_max)
ax.set_ylabel('$\lambda$')
ax.set_ylim(l_min, l_max)
ax.set_zlabel('$f$')
plt.show()
mu_min = -3
mu_max = 3
l_min = 0.1
l_max = 100
l_step = 0.3
mu,lamda = np.mgrid[mu_min:mu_max:mu_step,l_min:l_max:l_step]
p = GMM([1], np.array([[1,1./5]]))
#p = GMM([0.5,0.5], np.array([[-1,.2], [1,.2]]))
def call_back(i=1):
z = f(mu,lamda,p.sample(i)[0])
fig = plt.figure(figsize=(8,6))
ax = fig.gca(projection='3d')
ax.plot_surface(mu, lamda, z, rstride=3, cstride=3, alpha=0.3)
cset = ax.contour(mu, lamda, z, zdir='z', offset=0, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='x', offset=mu_min, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='y', offset=l_max, cmap=cm.coolwarm)
ax.set_xlabel('$\mu$')
ax.set_xlim(mu_min, mu_max)
ax.set_ylabel('$\lambda$')
ax.set_ylim(l_min, l_max)
ax.set_zlabel('$f$')
plt.show()
interactive_plot = interactive(call_back, i=(1, 1000))
output = interactive_plot.children[-1]
interactive_plot
p = GMM([1], np.array([[1,1./5]]))
m = []
l = []
def call_back(i=1):
global m, l
x = p.sample(i)[0]
m.append(x.mean())
l.append(1/x.var())
fig, ax = plt.subplots(1,2,figsize=(10,3))
ax[0].plot(m,label="$\mu$")
ax[0].legend()
ax[1].plot(l,label="$\lambda$")
ax[1].legend()
plt.show()
interactive_plot = interactive(call_back, i=(1, 1000))
output = interactive_plot.children[-1]
interactive_plot
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
def f(mu,lamda,x):
x = np.array(x)
N = x.size
val = 0
c = lamda/2
for xi in x:
val += (xi-mu)**2
return np.exp((N/2)*np.log(c/np.pi) - c*val)
mu_min = -1
mu_max = 3
mu_step = 0.05
l_min = 0.1
l_max = 5
l_step = 0.05
mu,lamda = np.mgrid[mu_min:mu_max:mu_step,l_min:l_max:l_step]
mf = mu.flatten()
lf = lamda.flatten()
locs = np.where(1/lf<(1-mf)**2)[0]
z = f(mu,lamda,[1])
fig = plt.figure(figsize=(8,6))
ax = fig.gca(projection='3d')
ax.plot_surface(mu, lamda, z, rstride=5, cstride=5, alpha=0.3)
cset = ax.contour(mu, lamda, z, zdir='z', offset=0.04, cmap=cm.coolwarm)
cset = ax.contour(mu, lamda, z, zdir='x', offset=mu_min, cmap=cm.coolwarm)
#cset = ax.fill_between(mu[:,0], l_min, l_max, where=1/lamda[0]<(1-mu[:,0])**2, facecolor='green', interpolate=True)
cset = ax.scatter(mu.flatten()[locs], lamda.flatten()[locs], zdir='z', c='g')
cset = ax.contour(mu, lamda, z, zdir='y', offset=l_max, cmap=cm.coolwarm)
ax.set_xlabel('$\mu$')
ax.set_xlim(mu_min, mu_max)
ax.set_ylabel('$\lambda$')
ax.set_ylim(l_min, l_max)
ax.set_zlabel('$f$')
plt.show()
p = GMM([0.5,0.5], np.array([[-1,.2], [1,.2]]))
t = np.linspace(-2,2,1000)
p_y = p(t)
fig, ax = plt.subplots(figsize=(10,3))
ax.plot(t,p_y)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To begin testing our powers, let's make something happen. An event in the center of the universe may be exciting.
Step2: Actually, that wasn't very exciting. Our universe exists in one static moment with one event in the center. Let us create time and allow the event to repeat.
Step3: This is not much better. It would be more interesting if as time progressed the event ocurred in different locations. To implement this we could specify instructions that tell the event when and where to occur. However, knowing everything that will happen is also unexciting. We want to be entertained and surprised. We want to introduce some uncertainty. Before we can introduce uncertainty about where the event will occur next, we must first know all the possible locations where the event could occur. To be general we allow the event to occur anywhere in the one dimensional universe and we identify each location within it by $\omega \in \Omega$. The set of all possible locations $\Omega$ is called the sample space. Even with all possible locations where the event could occur, it is not clear how we compare different locations or how we choose one over another. Currently, all of our locations $\Omega$ are qualititaive. Our universe has no metric, measure, or units thus each location $\omega$ is just an idea, an abstraction. We must quantify these locations for them to become amenable to traditional analysis. To address this, we define a function $X(\omega)$ that maps every location $\omega \in \Omega$ to a real number $x \in \mathbb{R}$. This function is called a random variable. Random variables will become the conduit for us to express events and define probabilities. Unless necessary to explicitly define, the sample space will be specified implicitly through random variables.
Step4: Maximum Likelihood
Step5: Online Learning with Sequential Data
Step6: As expected the online approach converges heuristically to the same solution as the batch approach. This iterative scheme was derived by maximizing the log-likelihood function on a set of observations and then rewriting it recursively. However, we can instead maximize the log-likelihood function directly using a technique called gradient ascent.
Step7: The confusion arises when we apply the optimization definition directly which says we must find the variable $x$ that maximizes the pdf. This is just a little mathematical slight of hand resulting from an imprecise specification of exactly what function is being optimized. When we observe values of the random variable $x$ we view the function $q(x; \mu, \lambda)$ differently. The observed random variable becomes the parameter and $q$ becomes a function of both $\mu$ and $\lambda$. It is no longer required to be a pdf as the integral over both $\mu$ and $\lambda$ is not guaranteed to equal 1. This is made clear by defining a new function for when $x$ is observed.
Step8: We can visually inspect this surface to guide our intuition about what values of $\mu$ and $\lambda$ will maximize our objective function $f(\mu, \lambda; x=1)$ or the likelihood function. We see clearly the peak along the $\mu$ axis is when $\mu=1$. This coincides with when the gaussian pdf $q(x)$ is centered over the observed value of $x=1$. It is less clear which value of $\lambda$ maximizes the general objective function. Within the domain where we have visualized the surface, it is along the boundary when $\lambda=5$. To guide our intuition further we look at $f(\mu,\lambda)$ for increasing values of $\lambda = [50,5000]$.
Step9: It appears as though the value of the objective function increases without bound along the lambda axis. This turns out to be true and can be seen explicitly by combining equations (?)(?).
Step10: How does the surface change as we increase the number of observations?
Step11: The mean and variance appear to be sensitive to the number of observations. Lets examine their convergence as a function of observation count
Step12: Our aim is to find the values of $\mu$, and $\lambda$ that make the objective function $f(\mu, \lambda; \mathrm{X})$ as large as possible. To ease notation we will drop the dependence on $\mathrm{X}$ and consider the multiple parameters as a vector $\boldsymbol{\theta} = (\mu, \lambda)^\top$. Our objective function becomes $f(\boldsymbol{\theta})$. If a particular location in the parameter space $\boldsymbol{\theta}_0$ is an extrema then the gradient of the objective function at that location will be the zero vector. If this condition is met, we call $\boldsymbol{\theta}_0$ a maximizer and usually give it significance with special notation $\boldsymbol{\theta}_0 = \boldsymbol{\theta}^*$.
Step13: Learning and Optimization
|
7,690
|
<ASSISTANT_TASK:>
Python Code:
# initialize environment
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import sympy as sy
x = np.linspace(0.0,10.0,1000)
dx = x[1]-x[0]
def numDeriv( x, f ):
return (x[1:]+x[:-1])*0.5, (f[1:]-f[:-1])/(x[1:]-x[:-1])
def func1(r):
r2 = r**2
E = 1-r2
fr = -2
return E,fr*r
def func2(r):
r2 = r**2
E = (1-r2)**2
fr = -2*(1-r2)
return E,fr*r
def func3(r):
r2 = r**2
E = 0.1*( (1-r2)* (4-r2) )
fr = 0.1*( -2*(1-r2) + -2*(4-r2) )
return E,fr*r
def func4(r):
r2 = r**2
E = 0.1*( (1-r2)* (4-r2)**2 )
fr = 0.1*( -4*(1-r2)*(4-r2) + -2*(4-r2)**2 )
return E,fr*r
def func5(r):
r2 = r**2
E = 0.05*( (1-r2)* (4-r2)**3 )
fr = 0.05*( -6*(1-r2)*(4-r2)**2 + -2*(4-r2)**3 )
return E,fr*r
def func6(r):
r2 = r**2
E = 0.025*( (1-r2)* (4-r2)**4 )
fr = 0.025*( -8*(1-r2)*(4-r2)**3 + -2*(4-r2)**4 )
return E,fr*r
funcs = [func1,func2,func3,func4,func5,func6]
for func in funcs:
E,F = func(x)
plt.subplot(2,1,1); plt.plot(x,E);
plt.subplot(2,1,2); plt.plot(x,-F);
plt.subplot(2,1,1); plt.ylim(-1.0,1.0); plt.xlim(0.0,4.0); plt.grid(); plt.ylabel("Energy"); plt.axhline(0,c='k',ls='--')
plt.subplot(2,1,2); plt.ylim(-4.0,4.0); plt.xlim(0.0,4.0); plt.grid(); plt.ylabel("Force"); plt.axhline(0,c='k',ls='--')
a = (1.0-x**2) # this per atom
b = (x-2)**2 # this will be on grid
plt.plot(x,a,label='a')
plt.plot(x,b,label='b')
plt.plot(x,a*b,lw=2,c='k',label='c')
vmax=1.00; plt.ylim(-vmax,vmax); plt.xlim(0.0,4.0); plt.grid(); plt.legend()
alpha = -1.6
def getMorse( r, R, eps, alpha=alpha ):
return eps*( np.exp(2*alpha*(r-R)) - 2*np.exp(alpha*(r-R)) )
def fastExp( x, n=4 ):
e = 1.0 + x/np.power(2.0,n);
for i in range(n): e*=e
return e
def getFastMorse( r, R, eps, alpha=alpha, n=4 ):
expar = fastExp(alpha*(r-R), n=n )
return eps*( expar*expar - 2*expar )
plt.plot( x, getMorse ( x, 4.0, 1.0 ), ':k', lw=2, label=('exact') )
for i in range(5):
plt.plot( x, getFastMorse( x, 4.0, 1.0, n=i ), ls='-', label=('aprox n=%i' %i ) )
vmax=1.00; plt.ylim(-vmax,vmax); plt.xlim(2.0,10.0); plt.grid(); plt.legend()
def Gaussian( r ):
return np.exp(-r*r)
def fastGauss( r, n=4 ):
x = r*r
e = 1.0 - x/np.power(2.0,n);
for i in range(n): e*=e
return e
plt.plot( x , Gaussian( x), '--k', label=('exact') )
for i in range(5):
plt.plot( x, fastGauss( x, n=i ), ls='-', label=('aprox n=%i' %i ) )
plt.ylim(-0.5,1.0); plt.xlim(0.0,4.0); plt.grid(); plt.legend()
def fastExp( x, n=4 ):
e = 1.0 + x/np.power(2.0,n);
e[e<0] = 0
for i in range(n): e*=e
return e
xs = np.linspace(0.0,10.0,300)
plt.plot( xs, np.exp(-xs), '--k', label=('exact') )
for i in range(5):
plt.plot( xs, fastExp(-xs, n=i ), ls='-', label=('aprox n=%i' %i ) )
plt.ylim(-0.5,1.0); plt.xlim(0.0,6.0); plt.grid(); plt.legend()
'''
V = (A-r2)*(B-r2)**4
fr = ( 4*(A-r2) - (B-r2) )*-2*(B-r2)**3
'''
r, r2, A, B, C = sy.symbols('r r2 A B C')
V = (A-r2)*(B-r2)**2
F = sy.diff(V, r2)
#F = sy.simplify(sy.expand(F))
F = sy.factor(F)
print F
F = F.expand()
print "coefs : "
#print sy.collect(F,r2)
print " ^0 : ",F.coeff(r2, 0)
print " ^1 : ",F.coeff(r2, 1)
print " ^2 : ",F.coeff(r2, 2)
print " ^3 : ",F.coeff(r2, 3)
print " ^4 : ",F.coeff(r2, 4)
print "solve : ", sy.solve(F,r2)
def UniPolyPot( r, Rmax=4.0, Rmin=2.0, n=4):
'''
potential in form $ V(r) = (A-r^2) * (B-r^2)^n $
'''
r2 = r**2
C = Rmin**2
B = Rmax**2
print "C=",C,"n=",n," C*(n+1)=",C*(n+1)," B=",B
A = (C*(n+1) - B)/n; print "A =",A," R0 =", np.sqrt(A)
resc = -1/((A-C)*(B-C)**n); print "resc = ", resc
ea = A-r2
eb = B-r2
ebn = eb**(n-1) * resc
E = ea * eb * ebn
fr = ( n*ea + eb) * ebn * 2
return E, fr*r
def plotUniPolyPot( Rmax=4.0, Rmin=2.0, n=4, clr=None ):
E,F = UniPolyPot(x, Rmax=Rmax, Rmin=Rmin, n=n )
plt.subplot(2,1,1); plt.plot(x,E, c=clr, label=("%i" %n));
plt.subplot(2,1,2); plt.plot(x,F, c=clr, label=("%i" %n));
plt.plot((x[:-1]+x[1:])*0.5,(E[:-1]-E[1:])/dx, ls=":",c=clr, label=("%i" %n));
plt.axvline(Rmax,c=clr, ls="--")
plotUniPolyPot( Rmax=4.0, Rmin=3.0, n=2, clr='r' )
plotUniPolyPot( Rmax=4.4, Rmin=3.0, n=3, clr='g' )
plotUniPolyPot( Rmax=4.8, Rmin=3.0, n=4, clr='b' )
plt.subplot(2,1,1); plt.ylim(-1.0,1.0); plt.xlim(0.0,6.0); plt.grid(); plt.ylabel("Energy"); plt.axhline(0,c='k',ls='--')
plt.subplot(2,1,2); plt.ylim(-4.0,4.0); plt.xlim(0.0,6.0); plt.grid(); plt.ylabel("Force"); plt.axhline(0,c='k',ls='--')
def R4(r, REmin=2.0, Emin=-0.2, E0=1.0 ):
REmin2 = REmin**2
Scaling = (E0-Emin)/(REmin2**2)
Rmax = np.sqrt(REmin2+np.sqrt(-(Emin/Scaling)))
R0 = np.sqrt(REmin2-np.sqrt(-(Emin/Scaling)))
mask=(r>Rmax)
r2=r**2
f = Scaling*( (REmin2-r2)**2 ) + Emin
df = Scaling*( 4*(REmin2-r2)*r )
ddf = Scaling*( 4*REmin2-12*r2 )
f[mask]=0; df[mask]=0; ddf[mask]=0;
return f,df,ddf, Rmax,R0,REmin
def R4_(r, R0=1.0, Rmax=2.0, Emin=-0.2 ):
Rmax2=Rmax**2; R02=R0**2
REmin2 = (Rmax2 + R02)*0.5
Scaling = -4*Emin/(Rmax2-R02)**2
mask=(r>Rmax)
r2=r**2
f = Scaling*( (REmin2-r2)**2 ) + Emin
df = Scaling*( 4*(REmin2-r2)*r )
ddf = Scaling*( 4*REmin2-12*r2 )
f[mask]=0; df[mask]=0; ddf[mask]=0;
return f,df,ddf, Rmax,R0,np.sqrt(REmin2)
def LR2(r, C=-2.5, K=1.0, A=1.0, s=0.1 ):
r2=r**2
f = K*r2 + A/(s+r2) + C
df = -( 2*K*r - 2*A*r/(s+r2)**2 )
ddf = -( 2*K - 2*A/(s+r2)**2 + 8*A*r2/(s+r2)**3 )
return f,df,ddf,0,0,0
rs = np.linspace(0,4.0,100)
#func = R4
func = R4_
#func = LR2
f,df,ddf, Rmax,R0,REmin = func(rs)
dr = rs[1]-rs[0]
df_ = -(f[2:]-f[:-2])/(2*dr)
ddf_ = (df_[2:]-df_[:-2])/(2*dr)
plt.figure(figsize=(5,15))
plt.subplot(3,1,1); plt.plot(rs,f) ; plt.axhline(0,ls='--',color='k'); plt.axvline(R0,ls='--',color='k'); plt.axvline(REmin,ls='--',color='k'); plt.axvline(Rmax,ls='--',color='k'); # plt.ylim(-1,1);
plt.subplot(3,1,2); plt.plot(rs,df) ; plt.axhline(0,ls='--',color='k'); plt.axvline(REmin,ls='--',color='k'); plt.axvline(Rmax,ls='--',color='k'); plt.plot(rs[1:-1],df_); # plt.ylim(-5,5);
plt.subplot(3,1,3); plt.plot(rs,ddf) ; plt.axhline(0,ls='--',color='k'); plt.axvline(Rmax,ls='--',color='k'); plt.plot(rs[2:-2],ddf_); # plt.ylim(-10,10);
plt.show()
'''
$ eps*( A/r^8 - 1/r^6) = eps*(R0^2-r^2)/(r^2)^4 $
$ A = R0^2 $
'''
r, r2, A, B, C = sy.symbols('r r2 A B C')
V = (A-r2)/r2**4 # A/r^8
F = sy.diff(V, r2)
#F = sy.simplify(sy.expand(F))
F = sy.factor(F)
print F
print "solve : ", sy.solve(F,r2)
def PolyInvR2( r, Rmin=3.0):
'''
'''
R0 = Rmin*np.sqrt(3.0/4.0) #*(3.0/4.0)
print Rmin, R0
r2 = r**2
A = R0**2
ir2 = 1.0/r2;
resc = -1.0/( (1/Rmin**8)*( A-Rmin**2 ) ); print resc
E = resc*(ir2**4)*( A-r2 )
fr = resc*(ir2**4)*(4*A*ir2-3)*2
return E, fr*r
def LenardLones( r, Rmin ):
r2 = r**2
ir2 = Rmin**2/r2
ir6 = ir2**3
resc = 1.0
E = resc*( ir6 - 2 )*ir6
fr = resc*( ir6 - 1 )*ir6*ir2*(2.0**(1.0/2.5)) # WHY ?
return E, fr*r
def plotPolyInvR2( Rmin=3.0, clr=None ):
E,F = PolyInvR2(x, Rmin=Rmin )
E_,F_ = LenardLones( x, Rmin=Rmin )
plt.subplot(2,1,1); plt.plot(x,E, c=clr); plt.plot(x,E_, c=clr, ls='--');
plt.subplot(2,1,2); plt.plot(x,F, c=clr); plt.plot(x,F_, c=clr, ls='--');
plt.plot((x[:-1]+x[1:])*0.5,(E_[:-1]-E_[1:])/dx, ls=":", lw=2.0, c=clr);
plt.axvline(Rmax,c=clr, ls="--")
plotPolyInvR2( Rmin=3.0, clr='r' )
#plotPolyInvR2( , clr='g' )
#plotPolyInvR2( , clr='b' )
plt.subplot(2,1,1); plt.ylim(-1.0,1.0); plt.xlim(0.0,6.0); plt.grid(); plt.ylabel("Energy"); plt.axhline(0,c='k',ls='--')
plt.subplot(2,1,2); plt.ylim(-1.0,1.0); plt.xlim(0.0,6.0); plt.grid(); plt.ylabel("Force"); plt.axhline(0,c='k',ls='--')
def getBuckingham( r, R0=3.4, eps=0.030, alpha=1.8 ):
'''
V = eps *( (6/(a-6)) * exp( a * (1-(R/R0) ) - (a/(a-6)) *(R0/R)**6 )
V = (eps/(a-6)) *( 6*exp( a * (1-(R/R0) ) - a*(R0/R)**6 )
V = (eps/(a-6)) *( 6*exp( -(a/R0)*(R0-R) ) - a*(R0/R)**6 )
'''
a = alpha*R0
pref = eps/(a-6)
A = pref * 6
#B = pref * a * (R0**6)
B = pref * a
print R0, eps, alpha, " | ", a, pref, " | ", A, B
#V = A*np.exp( -alpha*(r-R0) ) + B/(r**6)
V = A*np.exp( -alpha*(r-R0) ) - B*(R0/r)**6
return V
eps = 0.03
R0 = 3.4
alpha=1.8
V = getBuckingham( x, R0=R0, eps=eps, alpha=alpha ); #print V
x_,F = numDeriv( x, V )
plt.subplot(2,1,1); plt.plot(x, V )
plt.subplot(2,1,2); plt.plot(x_, F )
plt.subplot(2,1,1); plt.ylim(-0.1,0.1); plt.xlim(0.0,6.0); plt.grid(); plt.ylabel("Energy"); plt.axhline(0,c='k',ls='--');
plt.axvline( R0,c='k',ls='-'); plt.axhline( -eps,c='k',ls='-')
plt.subplot(2,1,2); plt.ylim(-0.1,0.1); plt.xlim(0.0,6.0); plt.grid(); plt.ylabel("Force"); plt.axhline(0,c='k',ls='--')
alpha = -1.6
def getMorse( r, R, eps, alpha=alpha ):
return eps*( np.exp(2*alpha*(r-R)) - 2*np.exp(alpha*(r-R)) )
def getElec( r, qq, w2=4 ):
return 14.3996448915*qq/(w2+r*r)
Emorse = getMorse( x, 3.0, 0.095*0.026 )
Eelec = getElec ( x, -0.4*0.2 )
plt.plot( x, Emorse, '-r', lw=2, label=('Morse') )
plt.plot( x, Eelec, '-b', lw=2, label=('Elec') )
plt.plot( x, Emorse+Eelec , '-k', lw=2, label=('Morse+Elec') )
vmax=0.5; plt.ylim(-vmax,vmax); plt.xlim(0.0,10.0); plt.grid(); plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How to use $|\vec r|^2$ instead of $|\vec r|$ (get rid of sqrt())
Step2: Factorized Polynominals
Step3: Approx exponential
Step4: Approx Gaussian
Step5: Exp power
Step6: Radial polynomynal potential
Step7: Even reciprocal polynominal
Step8: Buckingham potential
|
7,691
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'messy-consortium', 'emac-2-53-aerchem', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
7,692
|
<ASSISTANT_TASK:>
Python Code:
import pylearn2.utils
import pylearn2.config
import theano
import neukrill_net.dense_dataset
import neukrill_net.utils
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import holoviews as hl
%load_ext holoviews.ipython
import sklearn.metrics
cd ..
m = pylearn2.utils.serial.load("/disk/scratch/neuroglycerin/models/alexnet_extra_layer_dropouts2.pkl.recent")
%run check_test_score.py -v run_settings/alexnet_extra_layer_dropouts.json
%run check_test_score.py -v run_settings/alexnet_extra_layer_dropouts2.json
%run check_test_score.py -v run_settings/alexnet_extra_layer_dropouts3.json
def plot_monitor(model,c = 'valid_y_nll'):
channel = model.monitor.channels[c]
plt.title(c)
plt.grid(which="both")
plt.plot(channel.example_record,channel.val_record)
return None
plot_monitor(m)
plot_monitor(m,c="train_y_nll")
%run check_test_score.py -v run_settings/alexnet_extra_layer_dropouts2.json
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now check the model specified in alexnet_extra_layer_dropouts2.json model, which has 0.9 dropout on all but last convolutional layers, and 0.5 dropout on the last layer.
Step2: Finally, check the model specified in alexnet_extra_layer_dropouts3.json model, which has 0.5 dropout on all convolutional layers.
Step3: Looks like the alexnet_extra_layer_dropouts2.json model gave the best score. It also still continues to improve. Let's look at the evolution of nll
Step4: The graph looks sort of stable for valid_y_nll. Perhaps there's not that much room for improvement? It could be useful to let the model run for around 85 epochs, which is roughly the time the current best model ran for. The #3 model was stopped, and #1 and #2 are let to run until ~epoch 85.
|
7,693
|
<ASSISTANT_TASK:>
Python Code:
!pip install --user --upgrade --no-deps pixiedust
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from pixiedust.packageManager import PackageManager
pkg=PackageManager()
pkg.installPackage("graphframes:graphframes:0")
properties = {
'twitter': {
'restAPI': 'https://xxx:xxx@cdeservice.mybluemix.net/api/v1/messages/search',
'username': 'xxx',
'password': 'xxx'
},
'cloudant': {
'account':'https://xxx:xxx@xxx.cloudant.com',
'username':'xxx',
'password':'xxx',
'database':'election2016'
}
}
import requests
import json
from requests.auth import HTTPBasicAuth
import http.client
class TwitterToCloudant:
count = 100
def query_twitter(self, config, url, query, loop):
loop = loop + 1
if loop > (int(self.count) / 100):
return
# QUERY TWITTER
if url is None:
url = config["twitter"]["restAPI"]
print(url, query)
tweets = self.get_tweets(config, url, query)
else:
print(url)
tweets = self.get_tweets(config, url, query)
# LOAD TO CLOUDANT
self.load_cloudant(config, tweets)
# CONTINUE TO PAGE THROUGH RESULTS ....
if "related" in tweets:
url = tweets["related"]["next"]["href"]
#!! recursive call
self.query_twitter(config, url, None, loop)
def get_tweets(self, config, url, query):
# GET tweets from twitter endpoint
user = config["twitter"]["username"]
password = config["twitter"]["password"]
print ("GET: Tweets from {} ".format(url))
if query is None:
payload = {'country_code' :' us', 'lang' : 'en'}
else:
payload = {'q': query, 'country_code' :' us', 'lang' : 'en'}
response = requests.get(url, params=payload, auth=HTTPBasicAuth(user, password))
print ("Got {} response ".format(response.status_code))
tweets = json.loads(response.text)
return tweets
def load_cloudant(self, config, tweets):
# POST tweets to Cloudant database
url = config["cloudant"]["account"] + "/" + config["cloudant"]["database"] + "/_bulk_docs"
user = config["cloudant"]["username"]
password = config["cloudant"]["password"]
headers = {"Content-Type": "application/json"}
if "tweets" in tweets:
docs = {}
docs["docs"] = tweets["tweets"]
print ("POST: Docs to {}".format(url))
response = requests.post(url, data=json.dumps(docs), headers=headers, auth=HTTPBasicAuth(user, password))
print ("Got {} response ".format(response.status_code))
query = "#election2016"
count = 300
TtC = TwitterToCloudant()
TtC.count = count
TtC.query_twitter(properties, None, query, 0)
sc.version
sc._conf.getAll()
sqlContext = SQLContext(sc)
tweetsDF = sqlContext.read.format("com.cloudant.spark").\
option("cloudant.host",properties['cloudant']['account'].replace('https://','')).\
option("cloudant.username", properties['cloudant']['username']).\
option("cloudant.password", properties['cloudant']['password']).\
option("schemaSampleSize", "-1").\
option("jsonstore.rdd.partitions", "5").\
load(properties['cloudant']['database'])
tweetsDF.show(5)
tweetsDF.cache()
tweetsDF.printSchema()
tweetsDF2 = tweetsDF.select(tweetsDF.cde.author.gender.alias("gender"),
tweetsDF.cde.author.location.state.alias("state"),
tweetsDF.cde.content.sentiment.polarity.alias("polarity"))
tweetsDF2.count()
tweetsDF2.printSchema()
# count tweets by state
tweets_state = tweetsDF2.groupBy(tweetsDF2.state).count()
tweets_state.show(100)
# count by gender & polarity
tweets_gp0 = tweetsDF2.groupBy(tweetsDF2.gender, tweetsDF2.polarity).count()
tweets_gp0.show(100)
tweets_gp= tweetsDF2.where(tweetsDF2.polarity.isNotNull()).groupBy("polarity").pivot("gender").count()
tweets_gp.show(100)
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
tweets_state_pd = tweets_state.toPandas()
values = tweets_state_pd['count']
labels = tweets_state_pd['state']
plt.gcf().set_size_inches(16, 12, forward=True)
plt.title('Number of tweets by state')
plt.barh(range(len(values)), values)
plt.yticks(range(len(values)), labels)
plt.show()
tweets_gp_pd = tweets_gp.toPandas()
labels = tweets_gp_pd['polarity']
N = len(labels)
male = tweets_gp_pd['male']
female = tweets_gp_pd['female']
unknown = tweets_gp_pd['unknown']
ind = np.arange(N) # the x locations for the groups
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind-width, male, width, color='b', label='male')
rects2 = ax.bar(ind, female, width, color='r', label='female')
rects3 = ax.bar(ind + width, unknown, width, color='y', label='unknown')
ax.set_ylabel('Count')
ax.set_title('Tweets by polarity and gender')
ax.set_xticks(ind + width)
ax.set_xticklabels(labels)
ax.legend((rects1[0], rects2[0], rects3[0]), ('male', 'female', 'unknown'))
plt.show()
tweetsDF.registerTempTable("tweets_DF")
sqlContext.sql("SELECT count(*) AS cnt FROM tweets_DF").show()
sqlContext.sql("SELECT message.actor.displayName AS author, count(*) as cnt FROM tweets_DF GROUP BY message.actor.displayName ORDER BY cnt DESC").show(10)
hashtags = sqlContext.sql("SELECT message.object.twitter_entities.hashtags.text as tags \
FROM tweets_DF \
WHERE message.object.twitter_entities.hashtags.text IS NOT NULL")
l = hashtags.map(lambda x: x.tags).collect()
tagCloud = [item for sublist in l for item in sublist]
from pyspark.sql import Row
tagCloudDF = sc.parallelize(tagCloud)
row = Row("hashtag")
hashtagsDF = tagCloudDF.map(row).toDF()
hashtagsDF.registerTempTable("hashtags_DF")
trending = sqlContext.sql("SELECT count(hashtag) as CNT, hashtag as TAG FROM hashtags_DF GROUP BY hashtag ORDER BY CNT DESC")
trending.show(10)
import brunel
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
trending_pd = trending.toPandas()
trending_pd.to_csv('trending_pd.csv')
tweets_state_pd.to_csv('tweets_state_pd.csv')
tweets_gp_pd.to_csv('tweets_gp_pd.csv')
trending_pd.head(5)
%brunel data('trending_pd') cloud color(cnt) size(cnt) label(tag) :: width=900, height=600
tweets_state_pd.head(5)
%brunel data('tweets_state_pd') bubble label(state) x(state) color(count) size(count)
tweets_gp_pd.head(5)
%brunel data('tweets_gp_pd') bar x(polarity) y(male, female) color(male, female) tooltip(#all) legends(none) :: width=800, height=300
hashtagsDF.write.format("com.cloudant.spark").\
option("cloudant.host",properties['cloudant']['account'].replace('https://','')).\
option("cloudant.username", properties['cloudant']['username']).\
option("cloudant.password", properties['cloudant']['password']).\
option("bulkSize", "2000").\
save("hashtags")
from pixiedust.display import *
tweets_state_us = tweets_state.filter(tweets_state.state.isin("Alabama", "Alaska", "Arizona",
"Arkansas", "California", "Colorado", "Connecticut", "Delaware", "Florida",
"Georgia", "Hawaii", "Idaho", "Illinois Indiana", "Iowa", "Kansas", "Kentucky",
"Louisiana", "Maine", "Maryland", "Massachusetts", "Michigan", "Minnesota",
"Mississippi", "Missouri", "Montana Nebraska", "Nevada", "New Hampshire",
"New Jersey", "New Mexico", "New York", "North Carolina", "North Dakota",
"Ohio", "Oklahoma", "Oregon", "Pennsylvania Rhode Island", "South Carolina",
"South Dakota", "Tennessee", "Texas","Utah", "Vermont", "Virginia",
"Washington", "West Virginia", "Wisconsin", "Wyoming"))
tweets_state_us.show(5)
display(tweets_state_us)
# TRAINING by hashtag
from pyspark.mllib.feature import HashingTF
from pyspark.mllib.clustering import KMeans, KMeansModel
# dataframe of tweets' messages and hashtags
mhDF = sqlContext.sql("SELECT message.body as message, \
message.object.twitter_entities.hashtags.text as tags \
FROM tweets_DF \
WHERE message.object.twitter_entities.hashtags.text IS NOT NULL")
mhDF.show()
# create an RDD of hashtags
hashtagsRDD = mhDF.rdd.map(lambda h: h.tags)
# create Feature verctor for every tweet's hastags
# each hashtag represents feature
# a function calculates how many time hashtag is in a tweet
htf = HashingTF(100)
vectors = hashtagsRDD.map(lambda hs: htf.transform(hs)).cache()
print(vectors.take(2))
# Build the model (cluster the data)
numClusters = 10 # number of clusters
model = KMeans.train(vectors, numClusters, maxIterations=10, initializationMode="random")
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType, StringType
def predict(tags):
vector = htf.transform(tags)
return model.predict(vector)
# Creates a Column expression representing a user defined function
udfPredict = udf(predict, IntegerType())
def formatstr(message):
lines = message.splitlines()
return " ".join(lines)
udfFormatstr = udf(formatstr, StringType())
# transform mhDF into cmhDF, a dataframe containing formatted messages,
# hashtabs and cluster
mhDF2 = mhDF.withColumn("message", udfFormatstr(mhDF.message))
cmhDF = mhDF2.withColumn("cluster", udfPredict(mhDF2.tags))
cmhDF.show()
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# visualizing clusters
import brunel
cmh_pd = cmhDF.toPandas()
cmh_pd.to_csv('cmh_pd.csv')
%brunel data('cmh_pd') bubble x(cluster) color(#all) size(#count) tooltip(message, tags) legends(none)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pixiedust provides a nice visualization plugin for d3 style plots. Have a look at https
Step2: When the library has been loaded successfully you have access to the PackageManager. Use the PackageManager to install a package to supply GraphFrames. Those are needed later in the notebook to complete the instructions for Spark GraphX.
Step3: At this point you are being asked to Please restart Kernel to complete installation of the new package. Use the Restart Kernel dialog from the menu to do that. Once completed, you can start the analysis and resume with the next section.
Step4: Import all required Python libraries.
Step5: Define a class with helper functions to query the Twitter service API and load documents in the Cloudant database using the bulk load API. (Note
Step6: Finally we make the call the load our Cloudant database with tweets. To do that, we require two parameters
Step7: At this point you should see a number of debug messages with response codes 200 and 201. As a result your database is loaded with the number of tweets you provided in count variable above.
Step8: Now you want to create a Spark SQL context object off the given Spark context.
Step9: The Spark SQL context (sqlContext) is used to read data from the Cloudant database. We use a schema sample size and specified number of partitions to load the data with. For details on these parameters check https
Step10: For performance reasons we will cache the Data Frame to prevent re-loading.
Step11: The schema of a Data Frame reveals the structure of all JSON documents loaded from your Cloudant database. Depending on the setting for the parameter schemaSampleSize the created RDD contains attributes for the first document only, for the first N documents, or for all documents. Please have a look at https
Step12: With the use of the IBM Insights for Twitter API all tweets are enriched with metadata. For example, the gender of the Twitter user or the state of his account location are added in clear text. Sentiment analysis is also done at the time the tweets are loaded from the original Twitter API. This allows us to group tweets according to their positive, neutral, or negative sentiment.
Step13: The above statement executes extremely fast because no actual function or transformation was computed yet. Spark uses a lazy approach to compute functions only when they are actually needed. The following function is used to show the output of the Data Frame. At that point only do you see a longer runtime to compute tweetsDF2.
Step14: Work with other Spark SQL functions to do things like counting, grouping etc.
Step15: 2.1 Plot results using matplotlib
Step16: Plot the number of tweets per state. Notice again how Spark computes the result lazily. In no previous output did we require the full DataFrame and it did not have to get fully computed until now.
Step17: More plots to group data by gender and polarity.
Step18: 2.2 Create SQL temporary tables
Step19: Run SQL statements using the sqlContext.sql() function and render output with show(). The result of a SQL function could again be mapped to a data frame.
Step20: With multiple temporary tables (potentially from different databases) you can execute JOIN and UNION queries to analyze the database in combination.
Step21: The hashtags are in lists, one per tweet. We flat map this list to a large list and then store it back into a temporary table. The temporary table can be used to define a hashtag cloud to understand which hashtag has been used how many times.
Step22: Create a DataFrame from the Python dictionary we used to flatten our hashtags into. The DataFrame has a simple schema with just a single column called hastag.
Step23: Register a new temp table for hashtags. Group and count tags to get a sense of trending issues.
Step24: 2.3 Visualize tag cloud with Brunel
Step25: Brunel libraries are able to read data from CSV files only. We will export our Panda DataFrames to CSV first to be able to load them with the Brunel libraries below.
Step26: Top 5 records in every Panda DF.
Step27: The hast tag cloud is visualized using the Brunel cloud graph.
Step28: State and location data can be plotted on a map or a bubble graph representing the number of tweets per state. We will exercise maps later using the GraphX framework.
Step29: Brunel graphs are D3 based and interactive. Try using your mouse on the graph for Gender polarity to hover over details and zoom in on the Y axis.
Step30: 2.4 Write analysis results back to Cloudant
Step31: 3. Analysis with Spark GraphX
Step32: To render a chart you have options to select the columns to display or the aggregation function to apply.
Step33: Use a data set with at least two numeric columns to create scatter plots.
|
7,694
|
<ASSISTANT_TASK:>
Python Code:
import glob
import os
import time
import imageio
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from IPython import display
from tensorflow.keras import layers
np.random.seed(1)
tf.random.set_seed(1)
BATCH_SIZE = 128
BUFFER_SIZE = 60000
EPOCHS = 60
LR = 1e-2
EMBED_DIM = 64 # intermediate_dim
(train_images, _), (test_images, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype(
"float32"
)
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
# Batch and shuffle the data
train_dataset = tf.data.Dataset.from_tensor_slices(train_images)
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_dataset = train_dataset.prefetch(BATCH_SIZE * 4)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype(
"float32"
)
test_images = (test_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
# TODO 1.
def make_encoder(embed_dim):
model = tf.keras.Sequential(name="encoder")
model.add(
layers.Conv2D(
64, (5, 5), strides=(2, 2), padding="same", input_shape=[28, 28, 1]
)
)
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding="same"))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(embed_dim))
assert model.output_shape == (None, embed_dim)
return model
# TODO 1.
def make_decoder(embed_dim):
model = tf.keras.Sequential(name="decoder")
model.add(layers.Dense(embed_dim, use_bias=False, input_shape=(embed_dim,)))
model.add(layers.Dense(6272, use_bias=False, input_shape=(embed_dim,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 128)))
model.add(
layers.Conv2DTranspose(
128, (5, 5), strides=(1, 1), padding="same", use_bias=False
)
)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(
layers.Conv2DTranspose(
64, (5, 5), strides=(2, 2), padding="same", use_bias=False
)
)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(
layers.Conv2DTranspose(
1,
(5, 5),
strides=(2, 2),
padding="same",
use_bias=False,
activation="tanh",
)
)
assert model.output_shape == (None, 28, 28, 1)
return model
ae_model = tf.keras.models.Sequential(
[make_encoder(EMBED_DIM), make_decoder(EMBED_DIM)]
)
ae_model.summary()
make_encoder(EMBED_DIM).summary()
make_decoder(EMBED_DIM).summary()
# TODO 2.
def loss(model, original):
reconstruction_error = tf.reduce_mean(
tf.square(tf.subtract(model(original), original))
)
return reconstruction_error
optimizer = tf.keras.optimizers.SGD(lr=LR)
checkpoint_dir = "./ae_training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=ae_model)
# TODO 3.
@tf.function
def train_step(images):
with tf.GradientTape() as tape:
ae_gradients = tape.gradient(
loss(ae_model, images), ae_model.trainable_variables
)
gradient_variables = zip(ae_gradients, ae_model.trainable_variables)
optimizer.apply_gradients(gradient_variables)
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(ae_model, epoch + 1, test_images[:16, :, :, :])
# Save the model every 5 epochs
if (epoch + 1) % 5 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print(f"Time for epoch {epoch + 1} is {time.time() - start} sec")
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(ae_model, epochs, test_images[:16, :, :, :])
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
pixels = predictions[i, :, :] * 127.5 + 127.5
pixels = np.array(pixels, dtype="float")
pixels = pixels.reshape((28, 28))
plt.imshow(pixels, cmap="gray")
plt.axis("off")
plt.savefig(f"image_at_epoch_{epoch:04d}.png")
plt.show()
generate_and_save_images(ae_model, 4, test_images[:16, :, :, :])
# TODO 4.
train(train_dataset, EPOCHS)
# Display a single image using the epoch number
def display_image(epoch_no):
return PIL.Image.open(f"./ae_images/image_at_epoch_{epoch_no:04d}.png")
display_image(EPOCHS)
anim_file = "autoencoder.gif"
with imageio.get_writer(anim_file, mode="I") as writer:
filenames = glob.glob("./ae_images/image*.png")
filenames = sorted(filenames)
last = -1
for i, filename in enumerate(filenames):
frame = 2 * (i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
import IPython
if IPython.version_info > (6, 2, 0, ""):
display.Image(filename=anim_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we'll define some of the environment variables we'll use in this notebook. Note that we are setting the EMBED_DIM to be 64. This is the dimension of the latent space for our autoencoder.
Step2: Load and prepare the dataset
Step3: Next, we define our input pipeline using tf.data. The pipeline below reads in train_images as tensor slices and then shuffles and batches the examples for training.
Step4: Create the encoder and decoder models
Step5: The Decoder
Step6: Finally, we stitch the encoder and decoder models together to create our autoencoder.
Step7: Using .summary() we can have a high-level summary of the full autoencoder model as well as the individual encoder and decoder. Note how the shapes of the tensors mirror each other as data is passed through the encoder and then the decoder.
Step8: Next, we define the loss for our autoencoder model. The loss we will use is the reconstruction error. This loss is similar to the MSE loss we've commonly use for regression. Here we are applying this error pixel-wise to compare the original MNIST image and the image reconstructed from the decoder.
Step9: Optimizer for the autoencoder
Step10: Save checkpoints
Step11: Define the training loop
Step12: We use the train_step function above to define training of our autoencoder. Note here, the train function takes as argument the tf.data dataset and the number of epochs for training.
Step13: Generate and save images.
Step14: Let's see how our model performs before any training. We'll take as input the first 16 digits of the MNIST test set. Right now they just look like random noise.
Step15: Train the model
Step16: Create a GIF
|
7,695
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv("provstore/data.csv")
df.head()
df.describe()
# The number of each label in the dataset
df.label.value_counts()
from analytics import test_classification
results, importances = test_classification(df)
from analytics import balance_smote
df = balance_smote(df)
results_bal, importances_bal = test_classification(df)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classification on unbalanced (original) data
Step2: Cross Validation tests
Step3: ## Classification on balanced data
Step4: Balancing the data
|
7,696
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
import shutil, time, warnings
from contextlib import redirect_stdout
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack
from PIL import Image, ImageDraw, ImageFont
import multiprocessing
nproc = multiprocessing.cpu_count() // 2
%matplotlib inline
PIXSCALE = 0.262
figdir = '/global/project/projectdirs/desi/users/ioannis/legacysurveys/overview-paper'
figfile = os.path.join(figdir, 'gallery.fits')
jpgdir = os.path.join(figdir, 'jpg')
if not os.path.isdir(jpgdir):
os.mkdir(jpgdir)
pngdir = os.path.join(figdir, 'png')
if not os.path.isdir(pngdir):
os.mkdir(pngdir)
cat = Table()
cat['name'] = (
'NGC6742',
'M92',
'Bow-Shock',
'NGC2782',
'UGC10321',
'C4-2010'
)
cat['nicename'] = (
'NGC 6742 Planetary Nebula',
'Messier 92 Globular Cluster',
'Interstellar Bow Shock',
'NGC 2782',
'UGC 10321 Galaxy Group',
'SDSS/C4 Galaxy Cluster 2010'
)
cat['viewer'] = (
'http://legacysurvey.org/viewer/?layer=decals-dr6&ra=284.83291667&dec=48.46527778',
'http://legacysurvey.org/viewer/?layer=decals-dr6&ra=259.28029167&dec=43.13652778&zoom=12',
'http://legacysurvey.org/viewer?ra=325.6872&dec=1.0032&zoom=14&layer=decals-dr5',
'http://legacysurvey.org/viewer/?layer=decals-dr6&ra=138.52129167&dec=40.11369444&zoom=12',
'http://legacysurvey.org/viewer?ra=244.5280&dec=21.5591&zoom=14&layer=decals-dr5',
'http://legacysurvey.org/viewer?ra=29.0707&dec=1.0510&zoom=13&layer=decals-dr5'
)
cat['dr'] = (
'dr6',
'dr6',
'dr7',
'dr6',
'dr7',
'dr7'
)
cat['ra'] = (
284.83291667,
259.28029167,
325.6872,
138.52129167,
244.5280,
29.070641492
)
cat['dec'] = (
48.46527778,
43.13652778,
1.0032,
40.11369444,
21.5591,
1.050816667
)
cat['diam'] = np.array([
1.5,
20,
4,
7,
3,
5
]).astype('f4') # [arcmin]
cat
toss = Table()
toss['name'] = (
'Abell383',
'NGC2874'
)
toss['nicename'] = (
'Abell 383',
'NGC2874 Galaxy Group'
)
toss['viewer'] = (
'http://legacysurvey.org/viewer?ra=42.0141&dec=-3.5291&zoom=15&layer=decals-dr5',
'http://legacysurvey.org/viewer?ra=141.4373&dec=11.4284&zoom=13&layer=decals-dr5'
)
toss['dr'] = (
'dr5', # Abell 383
'dr5' # C4 cluster
)
toss['ra'] = (
42.0141,
141.44215000
)
toss['dec'] = (
-3.5291,
11.43696000
)
toss['diam'] = np.array([
6,
6
]).astype('f4') # [arcmin]
toss
def init_survey(dr='dr7'):
from legacypipe.survey import LegacySurveyData
if dr == 'dr7':
survey = LegacySurveyData(
survey_dir='/global/project/projectdirs/cosmo/work/legacysurvey/dr7',
output_dir=figdir)
else:
survey = LegacySurveyData(
survey_dir='/global/project/projectdirs/cosmo/work/legacysurvey/dr6',
output_dir=figdir)
return survey
def simple_wcs(obj):
Build a simple WCS object for a single object.
from astrometry.util.util import Tan
size = np.rint(obj['diam'] * 60 / PIXSCALE).astype('int') # [pixels]
wcs = Tan(obj['ra'], obj['dec'], size/2+0.5, size/2+0.5,
-PIXSCALE/3600.0, 0.0, 0.0, PIXSCALE/3600.0,
float(size), float(size))
return wcs
def _build_sample_one(args):
Wrapper function for the multiprocessing.
return build_sample_one(*args)
def build_sample_one(obj, verbose=False):
Wrapper function to find overlapping grz CCDs for a given object.
survey = init_survey(dr=obj['dr'])
print('Working on {}...'.format(obj['name']))
wcs = simple_wcs(obj)
try:
ccds = survey.ccds_touching_wcs(wcs) # , ccdrad=2*diam/3600)
except:
return None
if ccds:
# Is there 3-band coverage?
if 'g' in ccds.filter and 'r' in ccds.filter and 'z' in ccds.filter:
if verbose:
print('For {} found {} CCDs, RA = {:.5f}, Dec = {:.5f}, Diameter={:.4f} arcmin'.format(
obj['name'], len(ccds), obj['ra'], obj['dec'], obj['diam']))
return obj
return None
def build_sample(cat, use_nproc=nproc):
Build the full sample with grz coverage in DR6.
sampleargs = list()
for cc in cat:
sampleargs.append( (cc, True) ) # the False refers to verbose=False
if use_nproc > 1:
p = multiprocessing.Pool(nproc)
result = p.map(_build_sample_one, sampleargs)
p.close()
else:
result = list()
for args in sampleargs:
result.append(_build_sample_one(args))
# Remove non-matching objects and write out the sample
outcat = vstack(list(filter(None, result)))
print('Found {}/{} objects in the DR6+DR7 footprint.'.format(len(outcat), len(cat)))
return outcat
sample = build_sample(cat, use_nproc=1)
print('Writing {}'.format(figfile))
sample.write(figfile, overwrite=True)
sample
def custom_brickname(obj, prefix='custom-'):
brickname = 'custom-{:06d}{}{:05d}'.format(
int(1000*obj['ra']), 'm' if obj['dec'] < 0 else 'p',
int(1000*np.abs(obj['dec'])))
return brickname
def custom_coadds_one(obj, scale=PIXSCALE, clobber=False):
from legacypipe.runbrick import run_brick
#from astrometry.util.multiproc import multiproc
#from legacypipe.runbrick import stage_tims, run_brick
#from legacypipe.coadds import make_coadds
name = obj['name']
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(name))
if os.path.isfile(jpgfile) and not clobber:
print('File {} exists...skipping.'.format(jpgfile))
else:
size = np.rint(obj['diam'] * 60 / scale).astype('int') # [pixels]
print('Generating mosaic for {} with width={} pixels.'.format(name, size))
bands = ('g', 'r', 'z')
if 'Bow' in name:
rgb_kwargs = dict({'Q': 200, 'm': 0.01})
else:
rgb_kwargs = dict({'Q': 20, 'm': 0.03})
survey = init_survey(dr=obj['dr'])
brickname = custom_brickname(obj, prefix='custom-')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
run_brick(None, survey, radec=(obj['ra'], obj['dec']), pixscale=scale,
width=size, height=size, rgb_kwargs=rgb_kwargs, threads=nproc,
stages=['image_coadds'], splinesky=True, early_coadds=True, pixPsf=True,
hybridPsf=True, normalizePsf=True, write_pickles=False, depth_cut=False,
apodize=True, do_calibs=False, ceres=False)
sys.stdout.flush()
_jpgfile = os.path.join(survey.output_dir, 'coadd', 'cus', brickname,
'legacysurvey-{}-image.jpg'.format(brickname))
shutil.copy(_jpgfile, jpgfile)
shutil.rmtree(os.path.join(survey.output_dir, 'coadd'))
#custom_coadds_one(sample[2], clobber=True)
def custom_coadds(sample, clobber=False):
for obj in sample:
custom_coadds_one(obj, clobber=clobber)
coaddslogfile = os.path.join(figdir, 'make-coadds.log')
print('Generating the coadds.')
print('Logging to {}'.format(coaddslogfile))
t0 = time.time()
with open(coaddslogfile, 'w') as log:
with redirect_stdout(log):
custom_coadds(sample, clobber=True)
print('Total time = {:.3f} minutes.'.format((time.time() - t0) / 60))
barlen = np.round(60.0 / PIXSCALE).astype('int')
fonttype = os.path.join(figdir, 'Georgia.ttf')
def _add_labels_one(args):
Wrapper function for the multiprocessing.
return add_labels_one(*args)
def add_labels_one(obj, verbose=False):
name = obj['name']
nicename = obj['nicename']
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(name))
pngfile = os.path.join(pngdir, '{}.png'.format(name))
thumbfile = os.path.join(pngdir, 'thumb-{}.png'.format(name))
im = Image.open(jpgfile)
sz = im.size
fntsize = np.round(sz[0]/28).astype('int')
width = np.round(sz[0]/175).astype('int')
font = ImageFont.truetype(fonttype, size=fntsize)
draw = ImageDraw.Draw(im)
# Label the object name--
draw.text((0+fntsize*2, 0+fntsize*2), nicename, font=font)
# Add a scale bar--
x0, x1, yy = sz[1]-fntsize*2-barlen, sz[1]-fntsize*2, sz[0]-fntsize*2
draw.line((x0, yy, x1, yy), fill='white', width=width)
im.save(pngfile)
# Generate a thumbnail
if False:
cmd = '/usr/bin/convert -thumbnail 300x300 {} {}'.format(pngfile, thumbfile)
os.system(cmd)
def add_labels(sample):
labelargs = list()
for obj in sample:
labelargs.append((obj, False))
if nproc > 1:
p = multiprocessing.Pool(nproc)
res = p.map(_add_labels_one, labelargs)
p.close()
else:
for args in labelargs:
res = _add_labels_one(args)
%time add_labels(sample)
def make_montage(cat, clobber=False):
montagefile = os.path.join(figdir, 'overview-gallery.png')
ncol = 3
nrow = np.ceil(len(sample) / ncol).astype('int')
if not os.path.isfile(montagefile) or clobber:
cmd = 'montage -bordercolor white -borderwidth 1 -tile {}x{} -geometry 512x512 '.format(ncol, nrow)
cmd = cmd+' '.join([os.path.join(pngdir, '{}.png'.format(name)) for name in cat['name']])
cmd = cmd+' {}'.format(montagefile)
print(cmd)
os.system(cmd)
print('Writing {}'.format(montagefile))
%time make_montage(cat, clobber=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preliminaries
Step2: Build a sample with the objects of interest.
Step3: Some rejected objects.
Step8: Ensure all objects are in the DR6+DR7 footprint before building coadds.
Step9: Generate the color mosaics for each object.
Step11: Add labels and a scale bar.
Step12: Finally make a nice montage figure for the paper.
|
7,697
|
<ASSISTANT_TASK:>
Python Code:
from scipy import sparse
import numpy as np
sa = sparse.csr_matrix(np.array([[1,2,3],[4,5,6],[7,8,9]]))
sb = sparse.csr_matrix(np.array([0,1,2]))
result = sa.multiply(sb)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,698
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import sys
sys.path.append("../../../bayesianpy")
import bayesianpy
from bayesianpy.network import Builder as builder
import logging
import os
import math
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import seaborn as sns
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
bayesianpy.jni.attach(logger)
db_folder = bayesianpy.utils.get_path_to_parent_dir("")
iris = pd.read_csv(os.path.join(db_folder, "data/iris.csv"), index_col=False)
network = bayesianpy.network.create_network()
cluster = builder.create_cluster_variable(network, 4)
node = builder.create_multivariate_continuous_node(network, iris.drop('iris_class',axis=1).columns.tolist(), "joint")
builder.create_link(network, cluster, node)
class_variable = builder.create_discrete_variable(network, iris, 'iris_class', iris['iris_class'].unique())
builder.create_link(network, cluster, class_variable)
head_variables = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
with bayesianpy.data.DataSet(iris, db_folder, logger) as dataset:
model = bayesianpy.model.NetworkModel(network, logger)
model.train(dataset)
queries = [bayesianpy.model.QueryConditionalJointProbability(
head_variables=[v],
tail_variables=['iris_class']) for v in head_variables]
(engine, _, _) = bayesianpy.model.InferenceEngine(network).create()
query = bayesianpy.model.SingleQuery(network, engine, logger)
results = query.query(queries)
jd = bayesianpy.visual.JointDistribution()
fig = plt.figure(figsize=(10,10))
for i, r in enumerate(list(results)):
ax = fig.add_subplot(2, 2, i+1)
jd.plot_distribution_with_variance(ax, iris, queries[i].get_head_variables(), r)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create the network, specifying a latent variable.
Step2: And finally, query the model, specifying each variable in a separate query (otherwise the query will return a covariance matrix)
|
7,699
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from topoflow.components.met_base import met_component
m = met_component()
cfg_file = './input/meteorology.cfg'
m.initialize(cfg_file)
precip = m.get_value('atmosphere_water__precipitation_leq-volume_flux') # `P` internally
print type(precip)
print precip.size
precip
print m.get_start_time(), m.get_current_time(), m.get_end_time()
m.update()
print '\nCurrent time: {} s'.format(m.get_current_time())
precip = m.get_value('atmosphere_water__precipitation_leq-volume_flux')
print precip
new_value = np.array(15, dtype=np.float64) # set_value doesn't convert to the TF type...
m.set_value('atmosphere_water__precipitation_leq-volume_flux', new_value)
precip = m.get_value('atmosphere_water__precipitation_leq-volume_flux')
print precip
m.update()
print '\nCurrent time: {} s'.format(m.get_current_time())
precip = m.get_value('atmosphere_water__precipitation_leq-volume_flux')
print precip
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import the Meteorology component and create an instance
Step2: Locate the cfg file and initialize the component
Step3: Despite setting a value of 20 mm/hr for P in the cfg file, if I call get_value at this point, the model precip values are zeroed out
Step4: Maybe this will change after the first update?
Step5: Advance the model by one time step
Step6: Note that it hasn't precipitated
Step7: But this might be expected, since the initial precip value retrieved by get_value was also zero.
Step8: Note that I can't simply give set_value a new value of 15; I need to create a one-element Numpy array with a value of 15 to match what TopoFlow needs internally.
Step9: Is precipitation being produced?
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.