Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
2,500
|
<ASSISTANT_TASK:>
Python Code:
from collections import deque
queue = deque(["Eric", "John", "Michael"])
queue.append("Terry") # Terry 入队
queue.append("Graham") # Graham 入队
queue.pop() # 队尾元素出队
queue.popleft() # 队首元素出队
queue # 队列中剩下的元素
basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
print(basket) # 这里演示的是去重功能
print('orange in basket? ', 'orange' in basket) # 快速判断元素是否在集合内
print('crabgrass in basket? ', 'crabgrass' in basket)
# 下面展示两个集合间的运算.
a = set('abracadabra')
b = set('alacazam')
print(a)
print(b)
print(a & b) # 交集
print(a | b) # 并集
print(a - b) # 差集
print(a ^ b) # 对称差
import re
# 将正则表达式编译成Pattern对象
pattern = re.compile(r'hello')
# 使用Pattern匹配文本,获得匹配结果,无法匹配时将返回None
match = pattern.match('hello world!')
if match:
# 使用Match获得分组信息
print(match.group())
m = re.match(r'hello', 'hello world!')
print(m.group())
a = re.compile(r\d + # the integral part
\. # the decimal point
\d * # some fractional digits, re.X)
b = re.compile(r"\d+\.\d*")
m = re.match(r'(\w+) (\w+)(?P<sign>.*)', 'hello world!')
print("m.string:", m.string)
print("m.re:", m.re)
print("m.pos:", m.pos)
print("m.endpos:", m.endpos)
print("m.lastindex:", m.lastindex)
print("m.lastgroup:", m.lastgroup)
print("m.group(1,2):", m.group(1, 2))
print("m.groups():", m.groups())
print("m.groupdict():", m.groupdict())
print("m.start(2):", m.start(2))
print("m.end(2):", m.end(2))
print("m.span(2):", m.span(2))
print(r"m.expand(r'\2 \1\3'):", m.expand(r'\2 \1\3'))
p = re.compile(r'(\w+) (\w+)(?P<sign>.*)', re.DOTALL)
print("p.pattern:", p.pattern)
print("p.flags:", p.flags)
print("p.groups:", p.groups)
print("p.groupindex:", p.groupindex)
pattern = re.compile(r'hello')
# 使用Pattern匹配文本,获得匹配结果,无法匹配时将返回None
match = pattern.match('hello world!')
if match:
print(match.group())
p = re.compile(r'world')
# 使用search()查找匹配的子串,不存在能匹配的子串时将返回None
# 这个例子中使用match()无法成功匹配
match1 = p.search('hello world!')
match2 = p.match('hello world!')
if match1:
print('pattern.search result: ', match1.group())
if match2:
print('pattern.match result: ', match2.group())
p = re.compile(r'\d+')
print(p.split('one1two2three3four4'))
p = re.compile(r'\d+')
print(p.findall('one1two2three3four4'))
p = re.compile(r'\d+')
for m in p.finditer('one1two2three3four4'):
print(m.group())
p = re.compile(r'(\w+) (\w+)')
s = 'i say, hello world!'
print(p.sub(r'\2 \1', s))
def func(m):
return m.group(1).title() + ' ' + m.group(2).title()
print(p.sub(func, s))
p = re.compile(r'(\w+) (\w+)')
s = 'i say, hello world!'
print(p.subn(r'\2 \1', s))
def func(m):
return m.group(1).title() + ' ' + m.group(2).title()
print(p.subn(func, s))
import re
import urllib.request
import urllib
from collections import deque
queue = deque()
visited = set()
url = 'http://news.dbanotes.net' # 入口页面, 可以换成别的
queue.append(url)
cnt = 0
while queue:
url = queue.popleft() # 队首元素出队
visited |= {url} # 标记为已访问
print('已经抓取: ' + str(cnt) + ' 正在抓取 <--- ' + url)
cnt += 1
urlop = urllib.request.urlopen(url)
if 'html' not in urlop.getheader('Content-Type'):
continue
# 避免程序异常中止, 用try..catch处理异常
try:
data = urlop.read().decode('utf-8')
except:
continue
# 正则表达式提取页面中所有队列, 并判断是否已经访问过, 然后加入待爬队列
linkre = re.compile('href="(.+?)"')
for x in linkre.findall(data):
if 'http' in x and x not in visited:
queue.append(x)
print('加入队列 ---> ' + x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Python的集合
Step2: 在我们的爬虫中, 只是用到其中的快速判断元素是否在集合内的功能, 以及集合的并运算.
Step3: re提供了众多模块方法用于完成正则表达式的功能。这些方法可以使用Pattern实例的相应方法替代,唯一的好处是少写一行re.compile()代码,但同时也无法复用编译后的Pattern对象。这些方法将在Pattern类的实例方法部分一起介绍。如上面这个例子可以简写为:
Step5: Compile
Step6: re模块还提供了一个方法escape(string),用于将string中的正则表达式元字符如*/+/?等之前加上转义符再返回,在需要大量匹配元字符时有那么一点用。
Step7: Pattern
Step8: re模块方法
Step9: search(string[, pos[, endpos]]) | re.search(pattern, string[, flags])
Step10: split(string[, maxsplit]) | re.split(pattern, string[, maxsplit])
Step11: findall(string[, pos[, endpos]]) | re.findall(pattern, string[, flags])
Step12: finditer(string[, pos[, endpos]]) | re.finditer(pattern, string[, flags])
Step13: sub(repl, string[, count]) | re.sub(pattern, repl, string[, count])
Step14: subn(repl, string[, count]) |re.sub(pattern, repl, string[, count])
Step15: 4. Python网络爬虫Ver 1.0 alpha
|
2,501
|
<ASSISTANT_TASK:>
Python Code:
from varcode import Variant
from pyensembl import ensembl_grch37
myVariant = Variant(contig=7, start=140453136, ref="A", alt="T", ensembl=ensembl_grch37)
myVariant.short_description
myVariant.coding_genes
myEffects = myVariant.effects()
myEffects
topPriorityEffect = myEffects.top_priority_effect()
topPriorityEffect
print ('The mutation %s leads to a %s in gene %s (%s)' % (myVariant.short_description, type(topPriorityEffect).__name__, topPriorityEffect.gene_name, topPriorityEffect.short_description))
variantLocation = topPriorityEffect.aa_mutation_start_offset
topPriorityEffect.original_protein_sequence[variantLocation-3:variantLocation+4]
topPriorityEffect.mutant_protein_sequence[variantLocation-3:variantLocation+4]
from varcode import load_maf, load_vcf
mafVariants = load_maf("../test/data/tcga_ov.head.maf")
mafVariants # should load 4 variants
vcfVariants = load_vcf("../test/data/somatic_hg19_14muts.vcf")
vcfVariants # should load 14 variants
vcfVariants.gene_counts()
mafVariants.gene_counts()
tp53Mutations = vcfVariants.groupby_gene_name()["TP53"].effects()
tp53Mutations
vcfEffects = vcfVariants.effects()
nonSilentMutations = vcfEffects.drop_silent_and_noncoding()
nonSilentMutations.top_priority_effect_per_gene_id()
from varcode import Insertion
vcfEffects.filter_by_effect_priority(Insertion).top_priority_effect_per_gene_id()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's create a new Variant that will represent our variant of interest
Step2: Now that we defined this variant,
Step3: this is our variation, but expressed using the offical variation nomenclature.
Step4: Looks like this variant lies within the BRAF gene;
Step5: What the above list tells us is that this variation can potentially affect four different BRAF transcripts
Step6: So overall, this what we have learned about the variant using varcode
Step7: Moreover, varcode can provide you with the altered protein sequence,
Step8: See that valenine (V) changing into a glutamine (E)?
Step9: The following loads mutations from the MAF file distributed within our code repository
Step10: and this should load variants from one of the VCF files
Step11: You can even extract summary statistics from these variant collections
Step12: Filtering Variants
Step13: or, for example, you might want to drop all mutations that do not affect a protein sequence or variants that fall in a non-coding genomic region
Step14: or, you might want to get all effects whose priority falls below an Insertion
|
2,502
|
<ASSISTANT_TASK:>
Python Code:
x = Variable(T.ones(2,2), requires_grad=True)
print x
y = T.exp(x + 2)
yy = T.exp(-x-2)
print y
z = (y + yy)/2
out = z.mean()
print z, out
make_dot(out)
out.backward(T.FloatTensor(1), retain_graph=True)
x.grad
T.randn(1,1)
from __future__ import print_function
xx = Variable(torch.randn(1,1), requires_grad = True)
print(xx)
yy = 3*xx
zz = yy**2
#yy.register_hook(print)
zz.backward(T.FloatTensor([0.1]))
print(xx.grad)
# y_pred = w2*(relu(w1*x))
# loss = 0.5*sum (y_pred - y)^2
import numpy as np
N, D_in, D_hidden, D_out = 50, 40, 100, 10
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
w1 = np.random.randn(D_in, D_hidden)
w2 = np.random.randn(D_hidden, D_out)
learning_rate = 0.0001
for t in range(100):
### 前向通道
h = x.dot(w1) #50x40 and 40x100 produce 50x100
h_relu = np.maximum(h, 0) #this has to be np.maximum as it takes two input arrays and do element-wise max, 50x100
y_pred = h_relu.dot(w2) #50x100 and 100x10 produce 50x10
#print y_pred.shape
### 误差函数
loss = 0.5 * np.sum(np.square(y_pred - y))
### 反向通道
grad_y_pred = y_pred - y #50x10
grad_w2 = h_relu.T.dot(grad_y_pred) #50x100 and 50x10 should produce 100x10, so transpose h_relu
grad_h_relu = grad_y_pred.dot(w2.T) #50x10 and 100x10 should produce 50x100, so transpose w2
grad_h = grad_h_relu.copy() #make a copy of
grad_h[h < 0] = 0 #
grad_w1 = x.T.dot(grad_h) #50x100 and 50x40 should produce 40x100
w1 = w1 - learning_rate * grad_w1
w2 = w2 - learning_rate * grad_w2
import torch
N, D_in, D_hidden, D_out = 50, 40, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
w1 = torch.randn(D_in, D_hidden)
w2 = torch.randn(D_hidden, D_out)
learning_rate = 0.0001
for t in range(100):
h = x.mm(w1) #50x40 and 40x100 produce 50x100
#h = x.matmul(w1) #50x40 and 40x100 produce 50x100, matmul for checking
h_relu = h.clamp(min=0) #this has to be np.maximum as it takes two input arrays and do element-wise max, 50x100
y_pred = h_relu.mm(w2) #50x100 and 100x10 produce 50x10
#print y_pred.shape
loss = 0.5 * (y_pred - y).pow(2).sum()
grad_y_pred = y_pred - y #50x10
grad_w2 = h_relu.t().mm(grad_y_pred) #50x100 and 50x10 should produce 100x10, so transpose h_relu
grad_h_relu = grad_y_pred.mm(w2.t()) #50x10 and 100x10 should produce 50x100, so transpose w2
grad_h = grad_h_relu.clone() #make a copy
grad_h[grad_h < 0] = 0 #
grad_w1 = x.t().mm(grad_h) #50x100 and 50x40 should produce 40x100
w1 = w1 - learning_rate * grad_w1
w2 = w2 - learning_rate * grad_w2
import torch
from torch.autograd import Variable, backward
N, D_in, D_hidden, D_out = 50, 40, 100, 10
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, D_hidden), requires_grad=True)
w2 = Variable(torch.randn(D_hidden, D_out), requires_grad=True)
learning_rate = 0.0001
for t in range(100):
y_pred = x.mm(w1).clamp(min=0).mm(w2) #50x40 40x100 100x10 --> 50x10
loss = 0.5 * (y_pred - y).pow(2).sum()
loss.backward()
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
w1.grad.data.zero_()
w2.grad.data.zero_()
import tensorflow as tf
import numpy as np
N, D_in, D_hidden, D_out = 50, 40, 100, 10
x = tf.placeholder(tf.float32, shape=(None, D_in))
y = tf.placeholder(tf.float32, shape=(None, D_out))
w1 = tf.Variable(tf.random_normal((D_in, D_hidden)))
w2 = tf.Variable(tf.random_normal((D_hidden, D_out)))
y_pred = tf.matmul(tf.maximum(tf.matmul(x, w1), tf.zeros(1), w2))
loss = tf.reduce_sum((y - y_pred) ** 2)
grad_w1, grad_w2 = tf.gradients(loss, [w1, w2])
learning_rate = 0.001
new_w1, new_w2 = w1.assign(w1 - learning_rate * grad_w1), w2.assign(w2 - learning_rate * grad_w2)
with tf.session() as sess:
sess.run(tf.global_variables_initializer())
x_value = np.random.randn(N, D_in)
y_value = np.random.randn(N, D_out)
for i in xrange(100):
loss_value, _, _ = sess.run([loss, new_w1, new_w2],
feed_dict={x: x_value, y: y_value})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A simple numpy implementation of one hidden layer neural network.
Step2: with very slight modifications, we could end up with the implementation of the same algorithm in PyTorch
Step3: Now with the autograd functionality in PyTorch, we could see the ease of doing backpropagation, calculating gradients for two layers networks is not a big deal but it becomes much more complicated when the number of layers grows.
Step4: Basic matrix multiplication in Pytorch
|
2,503
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import SVG
SVG(filename='mass_spring_damper.svg')
import sympy as sym
import sympy.physics.mechanics as me
from sympy.physics.vector import init_vprinting
init_vprinting()
x, v = me.dynamicsymbols('x v')
m, c, k, g, t = sym.symbols('m c k g t')
ceiling = me.ReferenceFrame('C')
O = me.Point('O')
P = me.Point('P')
O.set_vel(ceiling, 0)
P.set_pos(O, x * ceiling.x)
P.set_vel(ceiling, v * ceiling.x)
P.vel(ceiling)
damping = -c * P.vel(ceiling)
stiffness = -k * P.pos_from(O)
gravity = m * g * ceiling.x
forces = damping + stiffness + gravity
forces
zero = me.dot(forces - m * P.acc(ceiling), ceiling.x)
zero
dv_by_dt = sym.solve(zero, v.diff(t))[0]
dx_by_dt = v
dv_by_dt, dx_by_dt
mass = me.Particle('mass', P, m)
kane = me.KanesMethod(ceiling, q_ind=[x], u_ind=[v], kd_eqs=[v - x.diff(t)])
fr, frstar = kane.kanes_equations([(P, forces)], [mass])
fr, frstar
M = kane.mass_matrix_full
f = kane.forcing_full
M, f
M.inv() * f
from pydy.system import System
sys = System(kane)
sys.constants = {m:10.0, g:9.8, c:5.0, k:10.0}
sys.initial_conditions = {x:0.0, v:0.0}
from numpy import linspace
sys.times = linspace(0.0, 10.0, 100)
x_trajectory = sys.integrate()
from pydy.viz import *
bob = Sphere(2.0, color="red", material="metal")
bob_vframe = VisualizationFrame(ceiling, P, bob)
ceiling_circle = Circle(radius=10, color="white", material="metal")
from numpy import pi
rotated = ceiling.orientnew("C_R", 'Axis', [pi / 2, ceiling.z])
ceiling_vframe = VisualizationFrame(rotated, O, ceiling_circle)
scene = Scene(ceiling, O, system=sys)
scene.visualization_frames = [bob_vframe, ceiling_vframe]
camera_frame = ceiling.orientnew('Camera Frame','Axis', [pi / 2, ceiling.z])
camera_point = O.locatenew('Camera Location', 100 * camera_frame.z)
primary_camera = PerspectiveCamera(camera_frame, camera_point)
scene.cameras = [primary_camera]
scene.display_ipython()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Start by loading in the core functionality of both SymPy and Mechanics.
Step2: We can make use of the pretty printing of our results by loading SymPy's printing extension, in particular we will use the vector printing which is nice for mechanics objects.
Step3: We'll start by defining the variables we will need for this problem
Step4: Now, we define a Newtonian reference frame that represents the ceiling which the particle is attached to, $C$.
Step5: We will need two points, one to represent the original position of the particle which stays fixed in the ceiling frame, $O$, and the second one, $P$ which is aligned with the particle as it moves.
Step6: The velocity of point $O$ in the ceiling is zero.
Step7: Point $P$ can move downward in the $y$ direction and its velocity is specified as $v$ in the downward direction.
Step8: There are three forces acting on the particle. Those due to the acceleration of gravity, the damper, and the spring.
Step9: Now we can use Newton's second law, $0=F-ma$, to form the equation of motion of the system.
Step10: We can then form the first order equations of motion by solving for $\frac{dv}{dt}$ and introducing the kinematical differential equation, $v=\frac{dx}{dt}$.
Step11: Forming the equations of motion can also be done with the automated methods available in the Mechanics package
Step12: Now we can construct a KanesMethod object by passing in the generalized coordinate, $x$, the generalized speed, $v$, and the kinematical differential equation which relates the two, $0=v-\frac{dx}{dt}$.
Step13: Now Kane's equations can be computed, and we can obtain $F_r$ and $F_r^*$.
Step14: The equations are also available in the form $M\frac{d}{dt}[q,u]^T=f(q, u)$ and we can extract the mass matrix, $M$, and the forcing functions, $f$.
Step15: Finally, we can form the first order differential equations of motion $\frac{d}{dt}[q,u]^T=M^{-1}f(\dot{u}, u, q)$, which is the same as previously found.
Step16: Simulating the system
Step17: Now, we specify the numerical values of the constants and the initial values of states in the form of a dict.
Step18: We must generate a time vector over which the integration will be carried out. NumPy's linspace is often useful for this.
Step19: The trajectory of the states over time can be found by calling the .integrate() method.
Step20: Visualizing the System
Step21: For visualizing the system, we need to create shapes for the objects we wish to visualize, and map each of them
Step22: Now create a circular disc that represents the ceiling and fix it to the ceiling reference frame. The circle's default axis is aligned with its local $y$ axis, so we need to attach it to a rotated ceiling reference frame if we want the circle's axis to align with the $\hat{c}_x$ unit vector.
Step23: Now we initialize a Scene. A Scene contains all the information required to visualize a System onto a canvas.
Step24: We provide the VisualizationFrames, which we want to visualize as a list to scene.
Step25: The default camera of Scene has the z axis of the base frame pointing out of the screen, and the y axis pointing up. We want the x axis to point downwards, so we supply a new camera that will achieve this.
Step26: Now, we call the display method.
|
2,504
|
<ASSISTANT_TASK:>
Python Code:
%%bash
pip freeze | grep tensor
!pip3 install tensorflow-hub==0.7.0
!pip3 install --upgrade tensorflow==1.15.3
!pip3 install google-cloud-bigquery==1.10
import os
import tensorflow as tf
import numpy as np
import tensorflow_hub as hub
import shutil
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.15.3'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
categories_list = open("categories.txt").read().splitlines()
authors_list = open("authors.txt").read().splitlines()
content_ids_list = open("content_ids.txt").read().splitlines()
mean_months_since_epoch = 523
embedded_title_column = hub.text_embedding_column(
key="title",
module_spec="https://tfhub.dev/google/nnlm-de-dim50/1",
trainable=False)
content_id_column = tf.feature_column.categorical_column_with_hash_bucket(
key="content_id",
hash_bucket_size= len(content_ids_list) + 1)
embedded_content_column = tf.feature_column.embedding_column(
categorical_column=content_id_column,
dimension=10)
author_column = tf.feature_column.categorical_column_with_hash_bucket(key="author",
hash_bucket_size=len(authors_list) + 1)
embedded_author_column = tf.feature_column.embedding_column(
categorical_column=author_column,
dimension=3)
category_column_categorical = tf.feature_column.categorical_column_with_vocabulary_list(
key="category",
vocabulary_list=categories_list,
num_oov_buckets=1)
category_column = tf.feature_column.indicator_column(category_column_categorical)
months_since_epoch_boundaries = list(range(400,700,20))
months_since_epoch_column = tf.feature_column.numeric_column(
key="months_since_epoch")
months_since_epoch_bucketized = tf.feature_column.bucketized_column(
source_column = months_since_epoch_column,
boundaries = months_since_epoch_boundaries)
crossed_months_since_category_column = tf.feature_column.indicator_column(tf.feature_column.crossed_column(
keys = [category_column_categorical, months_since_epoch_bucketized],
hash_bucket_size = len(months_since_epoch_boundaries) * (len(categories_list) + 1)))
feature_columns = [embedded_content_column,
embedded_author_column,
category_column,
embedded_title_column,
crossed_months_since_category_column]
record_defaults = [["Unknown"], ["Unknown"],["Unknown"],["Unknown"],["Unknown"],[mean_months_since_epoch],["Unknown"]]
column_keys = ["visitor_id", "content_id", "category", "title", "author", "months_since_epoch", "next_content_id"]
label_key = "next_content_id"
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column,record_defaults=record_defaults)
features = dict(zip(column_keys, columns))
label = features.pop(label_key)
return features, label
# Create list of files that match pattern
file_list = tf.io.gfile.glob(filename)
# Create dataset from file list
dataset = # TODO 1: Your code here
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
def model_fn(features, labels, mode, params):
net = tf.feature_column.input_layer(features, params['feature_columns'])
for units in params['hidden_units']:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, params['n_classes'], activation=None)
predicted_classes = tf.argmax(logits, 1)
from tensorflow.python.lib.io import file_io
with file_io.FileIO('content_ids.txt', mode='r') as ifp:
content = tf.constant([x.rstrip() for x in ifp])
predicted_class_names = tf.gather(content, predicted_classes)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'class_names' : predicted_class_names[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
table = tf.contrib.lookup.index_table_from_file(vocabulary_file="content_ids.txt")
labels = table.lookup(labels)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Compute evaluation metrics.
accuracy = # TODO 2: Your code here
top_10_accuracy = tf.metrics.mean(tf.nn.in_top_k(predictions=logits,
targets=labels,
k=10))
metrics = {
'accuracy': accuracy,
'top_10_accuracy' : top_10_accuracy}
tf.summary.scalar('accuracy', accuracy[1])
tf.summary.scalar('top_10_accuracy', top_10_accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
outdir = 'content_based_model_trained'
shutil.rmtree(outdir, ignore_errors = True) # start fresh each time
#tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir = outdir,
params={
'feature_columns': feature_columns,
'hidden_units': [200, 100, 50],
'n_classes': len(content_ids_list)
})
# Provide input data for training
train_spec = tf.estimator.TrainSpec(
input_fn = # TODO 3: Your code here
max_steps = 2000)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset("test_set.csv", tf.estimator.ModeKeys.EVAL),
steps = None,
start_delay_secs = 30,
throttle_secs = 60)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
%%bash
head -5 training_set.csv > first_5.csv
head first_5.csv
awk -F "\"*,\"*" '{print $2}' first_5.csv > first_5_content_ids
output = list(estimator.predict(input_fn=read_dataset("first_5.csv", tf.estimator.ModeKeys.PREDICT)))
import numpy as np
recommended_content_ids = [np.asscalar(d["class_names"]).decode('UTF-8') for d in output]
content_ids = open("first_5_content_ids").read().splitlines()
from google.cloud import bigquery
recommended_title_sql=
#standardSQL
SELECT
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) = \"{}\"
LIMIT 1.format(recommended_content_ids[0])
current_title_sql=
#standardSQL
SELECT
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) = \"{}\"
LIMIT 1.format(content_ids[0])
recommended_title = bigquery.Client().query(recommended_title_sql).to_dataframe()['title'].tolist()[0].encode('utf-8').strip()
current_title = bigquery.Client().query(current_title_sql).to_dataframe()['title'].tolist()[0].encode('utf-8').strip()
print("Current title: {} ".format(current_title))
print("Recommended title: {}".format(recommended_title))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's make sure you install the necessary version of tensorflow-hub. After doing the pip install below, click "Restart the kernel" on the notebook so that the Python environment picks up the new packages.
Step2: Note
Step3: Build the feature columns for the model
Step4: In the cell below you'll define the feature columns to use in your model. If necessary, remind yourself the various feature columns to use.
Step5: Create the input function
Step6: Create the model and train/evaluate
Step7: Train and Evaluate
Step8: This takes a while to complete but in the end, you will get about 30% top 10 accuracies.
Step9: Recall, to make predictions on the trained model you pass a list of examples through the input function. Complete the code below to make predictions on the examples contained in the "first_5.csv" file you created above.
Step12: Finally, you map the content id back to the article title. Let's compare your model's recommendation for the first example. This can be done in BigQuery. Look through the query below and make sure it is clear what is being returned.
|
2,505
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import mne
from mne import io
from mne.stats import permutation_t_test
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# pick MEG Gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
data = epochs.get_data()
times = epochs.times
temporal_mask = np.logical_and(0.04 <= times, times <= 0.06)
data = np.mean(data[:, :, temporal_mask], axis=2)
n_permutations = 50000
T0, p_values, H0 = permutation_t_test(data, n_permutations, n_jobs=1)
significant_sensors = picks[p_values <= 0.05]
significant_sensors_names = [raw.ch_names[k] for k in significant_sensors]
print("Number of significant sensors : %d" % len(significant_sensors))
print("Sensors names : %s" % significant_sensors_names)
evoked = mne.EvokedArray(-np.log10(p_values)[:, np.newaxis],
epochs.info, tmin=0.)
# Extract mask and indices of active sensors in the layout
stats_picks = mne.pick_channels(evoked.ch_names, significant_sensors_names)
mask = p_values[:, np.newaxis] <= 0.05
evoked.plot_topomap(ch_type='grad', times=[0], scalings=1,
time_format=None, cmap='Reds', vmin=0., vmax=np.max,
units='-log10(p)', cbar_fmt='-%0.1f', mask=mask,
size=3, show_names=lambda x: x[4:] + ' ' * 20,
time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: View location of significantly active sensors
|
2,506
|
<ASSISTANT_TASK:>
Python Code:
from sys import version
print(version)
from typing import List, Any, TypeVar
T = TypeVar("T")
tableau1 = [5, 4, 1, 2, 3]
tableau2 = [1, 1, 2, 3] # avec un doublon
def selection_naive(tableau: List[T], k: int) -> T:
Sélection du k-ième plus petit élément du tableau, récursivement et naïvement.
- Complexité mémoire : O(n)
- Complexité temps : O(k n)
assert 1 <= k <= len(tableau), f"Erreur : k = {k} doit être entre 1 et n = {len(tableau)} = len(tableau)"
min_tableau = min(tableau)
if k == 1: # cas simple
return min_tableau
else:
# on construit un nouveau tableau, de taille n-1, en enlevant UNE occurrence de min
nouveau_tableau = []
min_deja_vu = False
for valeur in tableau:
if valeur == min_tableau:
if min_deja_vu: # on a déjà vu UNE occurrence de min, on l'ajoute encore
nouveau_tableau.append(valeur)
else: # première occurrence de min
min_deja_vu = True
else:
nouveau_tableau.append(valeur)
return selection_naive(nouveau_tableau, k-1)
print(f"Pour le tableau {tableau1} :")
for k in range(1, 1 + len(tableau1)):
print(f" Le {k}-ième élément du tableau est {selection_naive(tableau1, k)}")
print(f"Pour le tableau {tableau2} :")
for k in range(1, 1 + len(tableau2)):
print(f" Le {k}-ième élément du tableau est {selection_naive(tableau2, k)}")
def selection_par_tri(tableau: List[T], k: int) -> T:
Sélection du k-ième plus petit élément du tableau, par un tri.
- Complexité mémoire : O(n)
- Complexité temps : O(n \log n)
copie_tableau = list(tableau) # ou tableau.copy()
copie_tableau.sort()
return copie_tableau[k - 1]
print(f"Pour le tableau {tableau1} :")
for k in range(1, 1 + len(tableau1)):
print(f" Le {k}-ième élément du tableau est {selection_par_tri(tableau1, k)}")
print(f"Pour le tableau {tableau2} :")
for k in range(1, 1 + len(tableau2)):
print(f" Le {k}-ième élément du tableau est {selection_par_tri(tableau2, k)}")
def mediane_cinq_valeurs(tableau: List[T]) -> T:
assert 1 <= len(tableau) <= 5, f"Erreur : tableau a longueur = {len(tableau)} mais devrait etre 1 <= .. <= 5"
valeurs = sorted(tableau)
return valeurs[len(tableau) // 2]
assert 3 == mediane_cinq_valeurs([1, 2, 3, 4, 5])
assert 3 == mediane_cinq_valeurs([5, 1, 4, 2, 3])
mediane_cinq_valeurs([1, 2, 3, 4])
mediane_cinq_valeurs([1, 2, 4])
tableau3 = [1,2,3,4,5, 6,7,8,9,10, 11,12,13]
# On va le découper comme ça :
[
[1,2,3,4,5],
[6,7,8,9,10],
[11,12,13],
]
[
tableau3[i:i+5]
for i in range(0, len(tableau3), 5)
]
[
mediane_cinq_valeurs(tableau3[i:i+5])
for i in range(0, len(tableau3), 5)
]
def mediane_des_medianes(tableau: List[T]) -> T:
Méthode de la médiane des médianes.
- Complexité mémoire : O(n).
- Complexité temps : O(n) car T(n) = O(n) + T(n/5) pour tout n=5^k (cas (i) du Master Theorem).
assert 1 <= len(tableau), f"Erreur : k = {k} doit être entre 1 et n = {len(tableau)} = len(tableau)"
n = len(tableau)
if n <= 5: # O(1)
return mediane_cinq_valeurs(tableau)
else:
medianes = [
# au plus 1 + (n/5) sous-tableaux de taille <= 5
mediane_cinq_valeur(tableau[i : i+5])
for i in range(0, n, 5)
]
# un appel récursive en T(n/5) (si on fait la preuve juste pour des n = 5^k)
return mediane_des_medianes(medianes)
mediane_des_medianes(tableau3)
def selection_par_medianes(tableau: List[T], k: int) -> T:
Sélection du k-ième plus petit élément du tableau, par la méthode de la médiane des médianes.
- Complexité mémoire : O(n)
- Complexité temps : O(n)
assert 1 <= k <= len(tableau), f"Erreur : k = {k} doit être entre 1 et n = {len(tableau)} = len(tableau)"
n = len(tableau)
print(f"DEBUG : n = {n}, tableau = {tableau}, k = {k}")
if n <= 1:
return tableau[0]
if n <= 5:
print(f"WARNING n = {n} <= k = {k}")
mediane = mediane_des_medianes(tableau)
index_mediane = tableau.index(mediane)
if (k-1) == index_mediane:
return mediane
else:
if (k-1) < index_mediane:
sous_tableau = [ x for x in tableau if x <= mediane ]
else: # (k-1) > index_mediane
sous_tableau = [ x for x in tableau if x >= mediane ]
return selection_par_medianes(sous_tableau, k)
print(f"Pour le tableau {tableau1} :")
for k in range(1, 1 + len(tableau1)):
print(f" Le {k}-ième élément du tableau est {selection_par_medianes(tableau1, k)}")
print(f"Pour le tableau {tableau2} :")
for k in range(1, 1 + len(tableau2)):
print(f" Le {k}-ième élément du tableau est {selection_par_medianes(tableau2, k)}")
def swap(tableau: List[T], i: int, j: int) -> None:
Échange tableau[i] et tableau[j] en temps constant.
tableau[i], tableau[j] = tableau[j], tableau[i]
def partition5(tableau: List[T], left: int, right: int) -> int:
Sélecte la médiane d'un tableau d'au plus 5 éléments, par un tri par insertion sur ces cinq éléments.
print(f"DEBUG: partition5({tableau}, {left}, {right})")
i = left + 1
while i <= right:
j = i
while j > left and tableau[j-1] > tableau[j]:
swap(tableau, j-1, j)
j = j - 1
i = i + 1
return (left + right) // 2
t = [5, 3, 2, 1, 4]
print(partition5(t, 0, len(t) - 1))
print(t)
def pivot(tableau: List[T], left: int, right: int) -> int:
Pivot en temps O(n).
print(f"DEBUG: pivot({tableau}, {left}, {right})")
if right - left + 1 <= 5: # <= 5 éléments
return partition5(tableau, left, right)
# sinon, on déplace les médianes des sous tableaux de cinq éléments vers les n/5 premières positions
for i in range(left, right + 1, 5):
# calcule la position de la médiane du i-ieme sous-groupe :
sub_right = i + 4
if sub_right > right:
sub_right = right
median5 = partition5(tableau, i, sub_right)
new_pos = left + (i - left)//5
swap(tableau, median5, new_pos)
# médiane des n/5 médianes des sous-groupes
mid = (right - left) // 10 + left + 1
return selection(tableau, mid, left=left, right=left + (right - left) // 5)
from typing import Optional
def selection(tableau: List[T], k: int, left: Optional[int] = None, right: Optional[int] = None) -> int:
Sélection en temps linéaire du k-ième plus petit élément dans [tab[left],...,tab[right]].
if left is None: left = 0
if right is None: right = len(tableau) - 1
print(f"DEBUG: selection({tableau}, {k}, {left}, {right})")
while True:
if left == right:
return left
pivot_index = pivot(tableau, left, right)
pivot_index = partition(tableau, left, right, pivot_index, k)
if k == pivot_index:
return k # déjà à la bonne position
elif k < pivot_index:
right = pivot_index - 1
else:
left = pivot_index + 1
def partition(tableau: List[T], left: int, right: int, pivot_index: int, k: int) -> int:
print(f"DEBUG: partition({tableau}, {left}, {right}, {pivot_index}, {k})")
pivot_value = tableau[pivot_index]
swap(tableau, pivot_index, right) # on déplace le pivot à la fin
store_index = left
# on déplace les éléments plus petits que le pivot à sa gauche
for i in range(left, right):
if tableau[i] < pivot_value:
swap(tableau, store_index, i)
store_index = store_index + 1
# on déplace tous les éléments égaux au pivot just après les plus petits éléments
store_index_eq = store_index
for i in range(store_index, right):
if tableau[i] == pivot_value:
swap(tableau, store_index_eq, i)
store_index_eq = store_index_eq + 1
# on déplace le pivot à sa position finale au milieu
swap(tableau, right, store_index_eq)
# on renvoie la localisation du pivot, compte tenu de la position recherchée k
if k < store_index:
return store_index # k est dans le groupe des petits éléments
elif k == store_index_eq:
return k # k est dans le groupe égal au pivot
else:
return store_index_eq # k est dans le groupe des petits éléments
print(f"Pour le tableau {tableau1} :")
for k in range(1, 1 + len(tableau1)):
print(f" Le {k}-ième élément du tableau est {selection(tableau1[:], k)}")
for tab in (
[1, 2, 3, 4, 5],
[1, 2, 3, 4],
# [1, 2, 3] * 10,
):
print(f"Pour le tableau {tab} :")
for k in range(1, 1 + len(tab)):
print(f" Le {k}-ième élément du tableau est {selection(tab[:], k)}")
for tab in (
tableau2,
tableau2[::-1],
):
print(f"Pour le tableau {tab} :")
for k in range(1, 1 + len(tab)):
print(f" Le {k}-ième élément du tableau est {selection(tab[:], k)}")
def find_i_th_smallest(tableau: List[T], k: Optional[int] = None, items_per_column: int = 5) -> T:
if k is None:
k = (len(tableau) - 1) // 2
n = len(tableau)
if n <= items_per_column:
# si tableau est assez petit, on fait juste un tri et on trouve le k-ième élément
return sorted(tableau)[k]
else:
# 1. partition A into columns of items_per_column items each. items_per_column is odd, say 15.
# 2. find the median of every column
# 3. put all medians in a new list, say, called medians
medians = [
find_i_th_smallest(tableau[j : (j + items_per_column)])
for j in range(0, n, items_per_column)
]
# 4. find M, the median of the medians
median_of_medians = find_i_th_smallest(medians) # with k=None, computes middle value = median
# 5. split A into 3 parts by M, { < M }, { == M }, and { > M }
# 6. find which above set has tableau's k-th smallest, recursively.
lefts = [ x for x in tableau if x < median_of_medians ]
if k < len(lefts):
return find_i_th_smallest(lefts, k)
rights = [ x for x in tableau if x > median_of_medians ]
len_rights = len(rights)
if k < (n - len_rights):
return median_of_medians
return find_i_th_smallest(rights, k - (n - len_rights))
try:
from tqdm.notebook import tqdm
except ImportError:
def tqdm(iterator, **kwargs): return iterator
number_of_tests = 1
# How many numbers should be randomly generated for testing?
for number_of_numbers in tqdm([100, 1000, 10000]):
print(f"Pour des listes de taille {number_of_numbers} :")
for test in tqdm(range(number_of_tests)):
print(f"Test {test + 1} / {number_of_tests} avec une liste de taille {number_of_numbers} :")
# create a list of random positive integers
tableau = [ random.randint(0, number_of_numbers) for i in range(0, number_of_numbers) ]
for L in [ tableau, tableau*2 ]:
sorted_L = sorted(L)
for k in range(len(L)):
assert sorted_L[k] == find_i_th_smallest(L, k)
%timeit -n1 sorted(L)[random.randint(0, len(L) - 1)]
%timeit -n1 find_i_th_smallest(L, random.randint(0, len(L) - 1))
number_of_tests = 1
# How many numbers should be randomly generated for testing?
for number_of_numbers in tqdm([500, 5000, 50000, 100000]):
print(f"Pour des listes de taille {number_of_numbers} :")
for test in tqdm(range(number_of_tests)):
print(f"Test {test + 1} / {number_of_tests} avec une liste de taille {number_of_numbers} :")
# create a list of random positive integers
tableau = [ random.randint(0, number_of_numbers) for i in range(0, number_of_numbers) ]
for L in [ tableau, tableau*2 ]:
sorted_L = sorted(L)
for k in range(len(L)):
assert sorted_L[k] == find_i_th_smallest(L, k)
%timeit -n1 sorted(L)[random.randint(0, len(L) - 1)]
%timeit -n1 find_i_th_smallest(L, random.randint(0, len(L) - 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: J'ai pris l'habitude d'écrire des signatures de fonctions en python qui soient typées
Step2: Je ferai les premiers exemples avec ces deux tableaux
Step4: Solution naïve en temps quadratique
Step5: Deux exemples
Step7: Solution naïve en temps sur logarithmique (O(n log n))
Step8: Deux exemples
Step9: Méthode optimale
Step10: Que se passe-t-il si la liste n'est pas de taille impaire ?
Step11: Découper le tableau en sous-tableaux
Step12: Cela est très rapide avec des listes par compréhension
Step13: Et donc si on calcule les médianes
Step15: Médiane des médianes par une approche récursive
Step16: Par exemple, la médiane des médianes du tableau d'exemple est 8 car son tableau des médianes des sous-tableaux de taille <= 5 est [3, 8, 12]
Step18: Sélection par médianes
Step19: Deux exemples
Step21: Autre approche
Step23: Avec un petit tri par insertion sur cinq éléments (max cinq), on peut trouver la médiane d'un petit tableau en temps constant
Step24: Par exemple
Step26: Le calcul du pivot fera un appel à selection, qui lui-même appelle pivot (par récursion mutuelle)
Step28: La sélection utilisera la fonction pivot et la partition
Step29: Et enfin la fonction de partition
Step30: Deux exemples
Step31: Autre implémentation
Step32: On peut faire quelques tests
|
2,507
|
<ASSISTANT_TASK:>
Python Code:
from pyannote.core import Segment
# start time in seconds
s = 1.
# end time in seconds
e = 9.
segment = Segment(start=s, end=e)
segment
start, end = segment
print 'from %f to %f' % (start, end)
print 'Segment %s ends at %g seconds.' % (segment, segment.end)
print 'Its duration is %g seconds.' % (segment.duration)
print 'Its middle stands as %g seconds.' % (segment.middle)
other_segment = Segment(4, 13)
if segment.intersects(other_segment):
print 'Segment %s intersects other segment %s.' % (segment, other_segment)
intersection = segment & other_segment
print 'Their intersection is %s.' % (str(intersection))
notebook.crop = Segment(0, 15)
segment
other_segment
segment & other_segment
other_segment = Segment(13, 20)
if not (segment & other_segment):
print 'Those two segments do not intersect.'
notebook.crop = Segment(0, 30)
segment
other_segment
other_segment = Segment(5, 6)
if other_segment in segment:
print '%s in included in %s' % (other_segment, segment)
t = 23.
if not segment.overlaps(t):
print '%s does not contain time %f' % (segment, t)
other_segment = Segment(10, 30)
print 'Union of %s and %s is %s' % (segment, other_segment, segment | other_segment)
other_segment = Segment(14, 15)
print 'Gap between %s and %s is %s' % (segment, other_segment, segment ^ other_segment)
from pyannote.core import SlidingWindow, Timeline
notebook.crop = Segment(0, 10)
window = SlidingWindow(start=0.0, step=1.1, duration=2., end=10.)
Timeline(window)
interval = Segment(3., 7.5)
interval
indices = window.crop(interval, mode='strict')
Timeline(window[i] for i in indices)
indices = window.crop(interval, mode='loose')
Timeline(window[i] for i in indices)
indices = window.crop(interval, mode='center')
Timeline(window[i] for i in indices)
help(Segment)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Segment instances are used to describe temporal fragments (e.g. of an audio file).
Step2: Segment instances are nothing more than 2-tuples augmented with several useful methods and properties.
Step3: Intersection
Step4: Inclusion
Step5: Other operations
Step6: SlidingWindow (pyannote.core.segment.SlidingWindow)
Step7: Iteration
Step8: Cropping
Step9: strict mode only returns fully contained positions.
Step10: loose mode returns any intersecting position.
Step11: center mode centers first and last position on interval boundaries.
Step12: Need help?
|
2,508
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
plt.rc("figure", figsize=(16,8))
plt.rc("font", size=14)
# First we'll simulate the synthetic data
def simulate_seasonal_term(periodicity, total_cycles, noise_std=1.,
harmonics=None):
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn())
gamma_star_jtp1[j - 1] = (- gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn())
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
duration = 100 * 3
periodicities = [10, 100]
num_harmonics = [3, 2]
std = np.array([2, 3])
np.random.seed(8678309)
terms = []
for ix, _ in enumerate(periodicities):
s = simulate_seasonal_term(
periodicities[ix],
duration / periodicities[ix],
harmonics=num_harmonics[ix],
noise_std=std[ix])
terms.append(s)
terms.append(np.ones_like(terms[0]) * 10.)
series = pd.Series(np.sum(terms, axis=0))
df = pd.DataFrame(data={'total': series,
'10(3)': terms[0],
'100(2)': terms[1],
'level':terms[2]})
h1, = plt.plot(df['total'])
h2, = plt.plot(df['10(3)'])
h3, = plt.plot(df['100(2)'])
h4, = plt.plot(df['level'])
plt.legend(['total','10(3)','100(2)', 'level'])
plt.show()
model = sm.tsa.UnobservedComponents(series.values,
level='fixed intercept',
freq_seasonal=[{'period': 10,
'harmonics': 3},
{'period': 100,
'harmonics': 2}])
res_f = model.fit(disp=False)
print(res_f.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_f.smoother_results.smoothed_state[0,-1:][0]))
res_f.plot_components()
plt.show()
model.ssm.transition[:, :, 0]
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=10,
freq_seasonal=[{'period': 100,
'harmonics': 2}])
res_tf = model.fit(disp=False)
print(res_tf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_tf.smoother_results.smoothed_state[0,-1:][0]))
fig = res_tf.plot_components()
fig.tight_layout(pad=1.0)
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
freq_seasonal=[{'period': 100}])
res_lf = model.fit(disp=False)
print(res_lf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lf.smoother_results.smoothed_state[0,-1:][0]))
fig = res_lf.plot_components()
fig.tight_layout(pad=1.0)
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=100)
res_lt = model.fit(disp=False)
print(res_lt.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lt.smoother_results.smoothed_state[0,-1:][0]))
fig = res_lt.plot_components()
fig.tight_layout(pad=1.0)
# Assign better names for our seasonal terms
true_seasonal_10_3 = terms[0]
true_seasonal_100_2 = terms[1]
true_sum = true_seasonal_10_3 + true_seasonal_100_2
time_s = np.s_[:50] # After this they basically agree
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
idx = np.asarray(series.index)
h1, = ax1.plot(idx[time_s], res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h2, = ax1.plot(idx[time_s], res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h3, = ax1.plot(idx[time_s], true_seasonal_10_3[time_s], label='True Seasonal 10(3)')
plt.legend([h1, h2, h3], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 10(3) component')
plt.show()
time_s = np.s_[:50] # After this they basically agree
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
h21, = ax2.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s], label='Double Freq. Seas')
h22, = ax2.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s], label='Mixed Domain Seas')
h23, = ax2.plot(idx[time_s], true_seasonal_100_2[time_s], label='True Seasonal 100(2)')
plt.legend([h21, h22, h23], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 100(2) component')
plt.show()
time_s = np.s_[:100]
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
h31, = ax3.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s] + res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h32, = ax3.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s] + res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h33, = ax3.plot(idx[time_s], true_sum[time_s], label='True Seasonal 100(2)')
h34, = ax3.plot(idx[time_s], res_lf.freq_seasonal[0].filtered[time_s], label='Lazy Freq. Seas')
h35, = ax3.plot(idx[time_s], res_lt.seasonal.filtered[time_s], label='Lazy Time Seas')
plt.legend([h31, h32, h33, h34, h35], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth', 'Lazy Freq. Seas', 'Lazy Time Seas'], loc=1)
plt.title('Seasonal components combined')
plt.tight_layout(pad=1.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Synthetic data creation
Step2: Unobserved components (frequency domain modeling)
Step3: Observe that the fitted variances are pretty close to the true variances of 4 and 9. Further, the individual seasonal components look pretty close to the true seasonal components. The smoothed level term is kind of close to the true level of 10. Finally, our diagnostics look solid; the test statistics are small enough to fail to reject our three tests.
Step4: The plotted components look good. However, the estimated variance of the second seasonal term is inflated from reality. Additionally, we reject the Ljung-Box statistic, indicating we may have remaining autocorrelation after accounting for our components.
Step5: Note that one of our diagnostic tests would be rejected at the .05 level.
Step6: The seasonal component itself looks good--it is the primary signal. The estimated variance of the seasonal term is very high ($>10^5$), leading to a lot of uncertainty in our one-step-ahead predictions and slow responsiveness to new data, as evidenced by large errors in one-step ahead predictions and observations. Finally, all three of our diagnostic tests were rejected.
|
2,509
|
<ASSISTANT_TASK:>
Python Code:
# this line is required to see visualizations inline for Jupyter notebook
%matplotlib inline
# importing modules that we need for analysis
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import re
# read the data from file and print out first few rows
jeopardy = pd.read_csv("jeopardy.csv")
print(jeopardy.head(3))
print(jeopardy.columns)
jeopardy.rename(columns = lambda x: x[1:] if x[0] == " " else x, inplace=True)
jeopardy.columns
data1 = jeopardy
data1["Question"].value_counts()[:10]
# regex pattern used to remove hyper-links
pattern = re.compile("^<a href")
# remove media clue questions
data1 = data1[data1["Question"].str.contains(pattern) == False]
data1 = data1[data1["Question"] != "[audio clue]"]
data1 = data1[data1["Question"] != "(audio clue)"]
data1 = data1[data1["Question"] != "[video clue]"]
data1 = data1[data1["Question"] != "[filler]"]
data1["Question"].value_counts()[:10]
data1["Question Length"] = data1["Question"].apply(lambda x: len(x))
data1["Question Length"][:12]
data1["Value"].value_counts()[:15]
# get rid of None values
data1 = data1[data1["Value"] != "None"]
# parse integers from strings
pattern = "[0-9]"
data1["Value"] = data1["Value"].apply(lambda x: "".join(re.findall(pattern,x)))
data1["Value"] = data1["Value"].astype(int)
print(data1["Value"].value_counts()[:10])
print("Number of distinct values:" + str(len(data1["Value"].value_counts())))
data1 = data1[(data1["Value"]%100 == 0) & (data1["Value"]<= 2500)]
print(data1["Value"].value_counts())
print("Number of distinct values: " + str(len(data1["Value"].value_counts())))
# set up the figure and plot length vs value on ax1
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(1,1,1)
ax1.scatter(data1["Question Length"], data1["Value"])
ax1.set_xlim(0, 800)
ax1.set_ylim(0, 2700)
ax1.set_title("The Relation between Question Length and Value")
ax1.set_xlabel("Lenght of the Question")
ax1.set_ylabel("Value of the Question")
plt.show()
#find the average length for each value
average_lengths = []
values = data1["Value"].unique()
for value in values:
rows = data1[data1["Value"] == value]
average = rows["Question Length"].mean()
average_lengths.append(average)
print(average_lengths)
print(values)
# set up the figure and plot average length vs value on ax1
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(1,1,1)
ax1.scatter(average_lengths, values)
ax1.set_title("The Relation between Average Question Length and Value")
ax1.set_xlabel("Average Question Length")
ax1.set_ylabel("Value")
ax1.set_xlim(70, 105)
ax1.set_ylim(0, 3000)
plt.show()
print("Correlation coefficient: " + str(np.corrcoef(average_lengths, values)[0,1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Apparently columns have a blank space in the beginning. Let's get rid of them
Step2: Hypothesis - "Value of the question is related to its length."
Step3: There are some media-based questions, and also some questions with hyper-links. These can disturb our analysis so we should get rid of them.
Step4: We can add a column to dataframe for lenght of questions.
Step5: When we look at the "Value" column, we see they are not integers but strings, also there are some "None" values. We should clean those values.
Step6: The "Value" column has 145 different values. For the sake of simplicity, let's keep the ones that are multiples of 100 and between 200 and 2500 (first round questions has range of 200-1000, second round questions has range of 500-2500).
Step7: It looks like there isn't a correlation, but this graph isn't structured well enough to draw conclusions. Instead, let's calculate average question length for each value and plot average length vs value.
|
2,510
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sympy as sy
from sympy.utilities.codegen import codegen
import control.matlab as cm
import re
import matplotlib.pyplot as plt
from scipy import signal
z = sy.symbols('z', real=False)
r1,s0,s1 = sy.symbols('r1,s0,s1', real=True)
hh = sy.symbols('h', real=True, positive=True)
Bp = 2*(0.5*hh - 1 + sy.exp(-0.5*hh))*z + 2*(1-sy.exp(-0.5*hh) - 0.5*hh*sy.exp(-0.5*hh))
Ap = (z-sy.exp(-0.5*hh))*(z-1)
print(Bp)
print(Ap)
Ap = sy.simplify(Ap.subs(hh, 0.04))
Bp = sy.simplify(Bp.subs(hh, 0.04))
print(Bp)
print(Ap)
z = sy.symbols('z', real=False)
r1,s0,s1,aa = sy.symbols('r1,s0,s1,aa', real=True)
Ac = z**2 - z*(0.8*2) + (0.8**2 + 0.16**2)
Ao = z-aa
Acl = Ac*Ao
Rp = z+r1
Sp = s0*z + s1
RHS = sy.simplify(sy.expand(Acl))
diopheq =sy.poly(Ap*Rp+Bp*Sp-Acl, z)
dioph = diopheq.all_coeffs()
print(sy.latex(sy.poly(RHS, z)))
print(dioph)
print(Acl)
print(Ap*Rp)
print(Ac)
print(Ap*Rp)
print(sy.poly(sy.simplify(sy.expand(Ap*Rp + Bp*Sp)), z))
sol = sy.solve(dioph, (r1,s0,s1))
print (-np.exp(-20*0.04) + 0.38)
print (- 0.98 + 0.6656)
print (1.6*np.exp(-20*0.04) - 0.98 + 0.6656)
print (-0.6656*np.exp(-20*0.04))
sol[r1].subs(aa, np.exp(-20*0.04))
print('r_1 = %f' % sol[r1].subs(aa, np.exp(-20*0.04)))
print('s_0 = %f' % sol[s0].subs(aa, np.exp(-20*0.04)))
print('s_1 = %f' % sol[s1].subs(aa, np.exp(-20*0.04)))
t0 = Ac.evalf(subs={z:1})/Bp.evalf(subs={z:1,})
print('t_0 = %f' % t0)
R = Rp.subs(sol)
S = Sp.subs(sol)
T = t0*Ao
Hc = T*Bp/(Ac*Ao)
Hcc = t0*0.8/Ac
sy.pretty_print(sy.expand(Hc))
sy.pretty_print(sy.expand(Hcc))
sy.pretty_print(Hc.evalf(subs={z:1}))
sy.pretty_print(sy.simplify(Ap*R + Bp*S))
0 + 2*1j
sy.Poly((Ac*Ao).subs(aa, -0.2), z).all_coeffs()
def my_bode(num, den, h=1.0):
n = len(den)-1
m = len(num)-1
w = np.linspace(0.01, np.pi, 400);
z = np.exp(1j*w);
dd = den[-1]
nd = num[-1]
for i in range(n):
dd += den[i]*np.power(z, n-i)
for i in range(m):
nd += num[i]*np.power(z, m-i)
return nd/dd,w
def bodeplots(a, aa, Ap, R, Ac, Ao):
numS = sy.list2numpy(sy.Poly((Ap*R).subs(aa, a), z).all_coeffs(),
dtype=np.float64)
numT = sy.list2numpy(sy.Poly((Bp*S).subs(aa, a), z).all_coeffs(),
dtype=np.float64)
den = sy.list2numpy(sy.Poly((Ac*Ao).subs(aa, a), z).all_coeffs(),
dtype=np.float64)
qS, wS = my_bode(numS, den)
qT, wT = my_bode(numT, den)
plt.figure()
plt.loglog(wS, np.abs(qS))
plt.loglog(wT, np.abs(qT))
plt.ylim((0.001, 10))
plt.legend(('Ss', 'Tt'))
#cm.bode([Ss, Tt])
bodeplots(-0.009, aa, Ap, R, Ac, Ao)
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from functools import partial
interact(bodeplots, a=(0.0, 1.0), aa=fixed(aa), Ap=fixed(Ap), R=fixed(R), Ac=fixed(Ac),
Ao=fixed(Ao))
# Reorganize solution expression for matlab code generation
sol_expr = ('RST_DC_lab', [Bp.all_coeffs()[0], Bp.all_coeffs()[1],
Ap.all_coeffs()[1], Ap.all_coeffs()[2],
sol[r1], sol[s0], sol[s1], A2p.subs(z, 1)/Bp.subs(z,1), h,np.exp(h*po1) ])
# Export to matlab code
[(m_name, m_code)] = codegen(sol_expr, 'octave')
m_code = m_code.replace("out1", "b0").replace("out2", "b1").replace("out3", "a1").replace("out4", "a2")
m_code = m_code.replace("out5", "r1").replace("out6", "s0").replace("out7", "s1").replace("out8", "t0")
m_code = m_code.replace("out9", "h").replace("out10", "obsPole")
m_code = m_code.replace("function ", "% function ")
m_code = m_code.replace("end", "")
print m_code
with open("/home/kjartan/Dropbox/undervisning/tec/MR2007/labs/dc_rst_design.m", "w") as text_file:
text_file.write(m_code)
cm.step?
G = Km * cm.tf([1], [tau, 1, 0])
Gd = Km * cm.tf([tau*(hpt-1+np.exp(-hpt)), tau*(1-(1+hpt)*np.exp(-hpt))], [1, -(1+np.exp(-hpt)), np.exp(-hpt)], h)
Gd2 = cm.c2d(G, h)
print Gd
print Gd2
print A2p
print A2p.evalf(subs={z:1})
print Bp
print Bp.evalf(subs={z:1})
0.3/(5*np.sqrt(2))
np.exp(-0.21)*np.sin(0.21)
np.exp(0.03*(-14))
0.746*41.8
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Determine sampling period and desired closed loop poles
Step2: Design a 2-DoF controller
|
2,511
|
<ASSISTANT_TASK:>
Python Code:
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Roman Goj <roman.goj@gmail.com>
# Denis Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import mne
from mne.datasets import somato
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
print(__doc__)
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Use a shorter segment of raw just for speed here
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(0, 120) # one minute for speed (looks similar to using all ~800 sec)
# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, preload=True)
del raw
# Paths to forward operator and FreeSurfer subject directory
fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
subjects_dir = op.join(data_path, 'derivatives', 'freesurfer', 'subjects')
freqs = np.logspace(np.log10(12), np.log10(30), 9)
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=20)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)
info = epochs.info
del epochs
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
fwd = mne.read_forward_solution(fname_fwd)
filters = make_dics(info, fwd, csd, noise_csd=csd_baseline,
pick_ori='max-power', reduce_rank=True, real_filter=True)
del fwd
baseline_source_power, freqs = apply_dics_csd(csd_baseline, filters)
beta_source_power, freqs = apply_dics_csd(csd_ers, filters)
stc = beta_source_power / baseline_source_power
message = 'DICS source power in the 12-30 Hz frequency band'
brain = stc.plot(hemi='both', views='axial', subjects_dir=subjects_dir,
subject=subject, time_label=message)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading the raw data and creating epochs
Step2: We are interested in the beta band. Define a range of frequencies, using a
Step3: Computing the cross-spectral density matrix for the beta frequency band, for
Step4: To compute the source power for a frequency band, rather than each frequency
Step5: Computing DICS spatial filters using the CSD that was computed on the entire
Step6: Applying DICS spatial filters separately to the CSD computed using the
Step7: Visualizing source power during ERS activity relative to the baseline power.
|
2,512
|
<ASSISTANT_TASK:>
Python Code:
!pygmentize moviesentiment.yaml
!kubectl apply -f moviesentiment.yaml
CLUSTER_IPS=!(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
CLUSTER_IP=CLUSTER_IPS[0]
print(CLUSTER_IP)
SERVICE_HOSTNAMES=!(kubectl get inferenceservice moviesentiment -o jsonpath='{.status.url}' | cut -d "/" -f 3)
SERVICE_HOSTNAME=SERVICE_HOSTNAMES[0]
print(SERVICE_HOSTNAME)
import sys
sys.path.append('../')
from alibi_helper import *
from alibi.datasets import fetch_movie_sentiment
movies = fetch_movie_sentiment()
idxNeg = 37
idxPos = 5227
for idx in [idxNeg,idxPos]:
print(movies.data[idx])
show_prediction(predict(movies.data[idx:idx+1],'moviesentiment',movies,SERVICE_HOSTNAME,CLUSTER_IP))
exp = explain(movies.data[idxNeg:idxNeg+1],"moviesentiment",SERVICE_HOSTNAME,CLUSTER_IP)
show_anchors(exp['data']['anchor'])
show_bar([exp['data']['precision']],[''],"Precision")
show_bar([exp['data']['coverage']],[''],"Coverage")
show_feature_coverage(exp['data'])
show_examples(exp['data'],0,movies)
show_examples(exp['data'],0,movies,False)
exp = explain(movies.data[idxPos:idxPos+1],"moviesentiment",SERVICE_HOSTNAME,CLUSTER_IP)
show_anchors(exp['data']['anchor'])
show_bar([exp['data']['precision']],[''],"Precision")
show_bar([exp['data']['coverage']],[''],"Coverage")
show_feature_coverage(exp['data'])
show_examples(exp['data'],0,movies)
show_examples(exp['data'],0,movies,False)
!kubectl delete -f moviesentiment.yaml
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get Explanation for Negative Prediction
Step2: Show precision. How likely predictions using the Anchor features would produce the same result.
Step3: Get Explanation for Positive Example
Step4: Show precision. How likely predictions using the Anchor features would produce the same result.
Step5: Teardown
|
2,513
|
<ASSISTANT_TASK:>
Python Code:
from pathlib import Path
Path.home()
import uuid
uuid.uuid4()
from har2tree import CrawledTree
har_path = Path() / '..' / 'tests' / 'capture_samples' / 'http_redirect' / '0.har'
my_first_crawled_tree = CrawledTree([har_path], str(uuid.uuid4()))
my_first_crawled_tree.root_url
print(my_first_crawled_tree.start_time)
print(my_first_crawled_tree.user_agent)
print(my_first_crawled_tree.redirects)
print(my_first_crawled_tree.root_hartree.start_time)
print(my_first_crawled_tree.root_hartree.start_time == my_first_crawled_tree.start_time)
my_first_crawled_tree.root_hartree.stats
print(my_first_crawled_tree.root_hartree.total_load_time)
print(my_first_crawled_tree.root_hartree.total_size_responses)
har_path = Path() / '..' / #the_path_of_your_capture
complex_crawled_tree = CrawledTree([har_path], str(uuid.uuid4()))
print(complex_crawled_tree.root_hartree.total_load_time)
my_first_crawled_tree.root_hartree.root_after_redirect
har_properties = my_first_crawled_tree.root_hartree.har # For readability
print('uuid: ' + har_properties.capture_uuid)
print('Path: ' + str(har_properties.path) + '\n') # What we defined before
print('Initial redirects: ' + str(har_properties.has_initial_redirects)) # Our example from before
print('Final redirect: ' + har_properties.final_redirect + '\n') # Same as root_after_redirect
print('Unique representation: ' + repr(har_properties)) # path of the capture and the uuid at the same time
print(har_properties.entries[0])
for entry in har_properties.entries:
print(entry['request']['url'])
print(har_properties.number_entries)
print(my_first_crawled_tree.root_hartree.all_url_requests)
print(len(my_first_crawled_tree.root_hartree.all_url_requests))
print(my_first_crawled_tree.root_hartree.rendered_node)
print("\n")
print(my_first_crawled_tree.root_hartree.rendered_node.describe())
my_first_crawled_tree.root_hartree.rendered_node.name
print(my_first_crawled_tree.root_hartree.rendered_node.get_ascii())
#TODO: I didnt manage to fix that one...
from getpass import getpass
!echo {getpass()} | "sudo -S ./interactive_tree.sh"
#Easiest way is to do ./interactive_tree.sh in your shell
my_first_crawled_tree.root_hartree.rendered_node.urls_in_rendered_page
root_node = my_first_crawled_tree.root_hartree.url_tree.search_nodes(name=my_first_crawled_tree.root_url)[0]
print(root_node.name)
my_first_crawled_tree.root_hartree.hostname_tree.to_json()
my_first_crawled_tree.root_hartree.hostname_tree.features
print("request cookies: " + str(my_first_crawled_tree.root_hartree.hostname_tree.request_cookie))
print("response cookies: " + str(my_first_crawled_tree.root_hartree.hostname_tree.response_cookie))
print("3rd party cookies: " + str(my_first_crawled_tree.root_hartree.hostname_tree.third_party_cookies_received))
print("mixed content: " + str(my_first_crawled_tree.root_hartree.hostname_tree.mixed_content))
har_path = Path() / '..' / 'tests' / 'capture_samples' / 'cookie' / '0.har'
cookie_crawled_tree = CrawledTree([har_path], str(uuid.uuid4()))
print("request cookies: " + str(cookie_crawled_tree.root_hartree.hostname_tree.request_cookie))
print("response cookies: " + str(cookie_crawled_tree.root_hartree.hostname_tree.response_cookie))
print("3rd party cookies: " + str(cookie_crawled_tree.root_hartree.hostname_tree.third_party_cookies_received))
print("mixed content: " + str(cookie_crawled_tree.root_hartree.hostname_tree.mixed_content))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Great. Now let's try to create our first tree. As mentioned before, you will also need to pass a uuid as a parameter, but don't worry, python has everything you need
Step2: Few notes though
Step3: Part 1
Step4: Why not also check at what time the capture was made, as well as the user agent that made it
Step5: Finally, what really interests us
Step6: And that's it for the first part. With very few lines of codes, we are able to extract very useful information in neglectable execution time. This makes it so much easier than having to go through the HAR file and find what you're looking for.
Step7: The stats property calls multiple useful other properties and displays them nicely in a JSON format. You can find what it calls here and trace it back to the other properties in case you want to know something in specific that is not covered here.
Step8: You can get a pretty good idea of the time taken to build a tree by calling the total_load_time property. It's not 100% precise as some loads are made in parallel but it gives a good approximation. Along with it, you can call total_size_responses that give you the size in bytes of the response bodies
Step9: You can see that with this simple capture, it doesn't take a lot of time. What about a more complex one?
Step10: A very interesting property to look at is root_after_redirect
Step11: If you really want to dig deeper and investigate the whole construction of the tree, I recommend you take a look here. This will give you more insight and the whole construction with every single step cannot be covered here.
Step12: Only execute that one if you want to see all the informations of one given URL.
Step13: As you can see, the URL is located in request > url. Let's see what we can do with that.
Step14: <span style="color
Step15: You can however see that we only have 6 as the duplicates were removed compared to 7 before.
Step16: You might find weird that <span style="color
Step17: Or you could invoke the get_ascii method because its default show_internal parameter is set to True but you will have to zoom out to get someting readable
Step18: Finally, you could run this little script. It invokes the method .show() of a node which opens a window with an interactive interface and really helps visualizing what the node actually contains. However, you may face a lot of problems while running it, so here is a screenshot just in case.
Step19: <br/>
Step20: Time for a bit more complicated example
Step21: To see all the informations that a node contains, you can simply dump all the features using the to_json method
Step22: But this is difficult to read. Instead, you could check the featuresproperty that are updated in the add_url method for HostNode or load_har_entry for URLNode that give a way clearer view of what's inside the node
Step23: A few more HostNode interesting features
Step24: Let's see what happens with the cookie capture of capture_samples where we passed a cookie in the request
|
2,514
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from statsmodels.compat import lmap
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
norms = sm.robust.norms
def plot_weights(support, weights_func, xlabels, xticks):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(support, weights_func(support))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=16)
ax.set_ylim(-.1, 1.1)
return ax
help(norms.AndrewWave.weights)
a = 1.339
support = np.linspace(-np.pi*a, np.pi*a, 100)
andrew = norms.AndrewWave(a=a)
plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]);
help(norms.Hampel.weights)
c = 8
support = np.linspace(-3*c, 3*c, 1000)
hampel = norms.Hampel(a=2., b=4., c=c)
plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]);
help(norms.HuberT.weights)
t = 1.345
support = np.linspace(-3*t, 3*t, 1000)
huber = norms.HuberT(t=t)
plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]);
help(norms.LeastSquares.weights)
support = np.linspace(-3, 3, 1000)
lst_sq = norms.LeastSquares()
plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]);
help(norms.RamsayE.weights)
a = .3
support = np.linspace(-3*a, 3*a, 1000)
ramsay = norms.RamsayE(a=a)
plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]);
help(norms.TrimmedMean.weights)
c = 2
support = np.linspace(-3*c, 3*c, 1000)
trimmed = norms.TrimmedMean(c=c)
plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
help(norms.TukeyBiweight.weights)
c = 4.685
support = np.linspace(-3*c, 3*c, 1000)
tukey = norms.TukeyBiweight(c=c)
plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
x = np.array([1, 2, 3, 4, 500])
x.mean()
np.median(x)
x.std()
stats.norm.ppf(.75)
print(x)
sm.robust.scale.mad(x)
np.array([1,2,3,4,5.]).std()
sm.robust.scale.iqr(x)
sm.robust.scale.qn_scale(x)
np.random.seed(12345)
fat_tails = stats.t(6).rvs(40)
kde = sm.nonparametric.KDEUnivariate(fat_tails)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.density);
print(fat_tails.mean(), fat_tails.std())
print(stats.norm.fit(fat_tails))
print(stats.t.fit(fat_tails, f0=6))
huber = sm.robust.scale.Huber()
loc, scale = huber(fat_tails)
print(loc, scale)
sm.robust.mad(fat_tails)
sm.robust.mad(fat_tails, c=stats.t(6).ppf(.75))
sm.robust.scale.mad(fat_tails)
from statsmodels.graphics.api import abline_plot
from statsmodels.formula.api import ols, rlm
prestige = sm.datasets.get_rdataset("Duncan", "carData", cache=True).data
print(prestige.head(10))
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige')
ax1.scatter(prestige.income, prestige.prestige)
xy_outlier = prestige.loc['minister', ['income','prestige']]
ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16)
ax2 = fig.add_subplot(212, xlabel='Education',
ylabel='Prestige')
ax2.scatter(prestige.education, prestige.prestige);
ols_model = ols('prestige ~ income + education', prestige).fit()
print(ols_model.summary())
infl = ols_model.get_influence()
student = infl.summary_frame()['student_resid']
print(student)
print(student.loc[np.abs(student) > 2])
print(infl.summary_frame().loc['minister'])
sidak = ols_model.outlier_test('sidak')
sidak.sort_values('unadj_p', inplace=True)
print(sidak)
fdr = ols_model.outlier_test('fdr_bh')
fdr.sort_values('unadj_p', inplace=True)
print(fdr)
rlm_model = rlm('prestige ~ income + education', prestige).fit()
print(rlm_model.summary())
print(rlm_model.weights)
dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1')
ax.scatter(*dta.values.T)
# highlight outliers
e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r')
ax.add_patch(e);
ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6),
arrowprops=dict(facecolor='black', shrink=0.05, width=2),
horizontalalignment='left', verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
fontsize=16,
)
# annotate these with their index
for i,row in dta.loc[dta['log.Te'] < 3.8].iterrows():
ax.annotate(i, row, row + .01, fontsize=14)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
from IPython.display import Image
Image(filename='star_diagram.png')
y = dta['log.light']
X = sm.add_constant(dta['log.Te'], prepend=True)
ols_model = sm.OLS(y, X).fit()
abline_plot(model_results=ols_model, ax=ax)
rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit()
abline_plot(model_results=rlm_mod, ax=ax, color='red')
infl = ols_model.get_influence()
h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs
hat_diag = infl.summary_frame()['hat_diag']
hat_diag.loc[hat_diag > h_bar]
sidak2 = ols_model.outlier_test('sidak')
sidak2.sort_values('unadj_p', inplace=True)
print(sidak2)
fdr2 = ols_model.outlier_test('fdr_bh')
fdr2.sort_values('unadj_p', inplace=True)
print(fdr2)
l = ax.lines[-1]
l.remove()
del l
weights = np.ones(len(X))
weights[X[X['log.Te'] < 3.8].index.values - 1] = 0
wls_model = sm.WLS(y, X, weights=weights).fit()
abline_plot(model_results=wls_model, ax=ax, color='green')
yy = y.values[:,None]
xx = X['log.Te'].values[:,None]
params = [-4.969387980288108, 2.2531613477892365] # Computed using R
print(params[0], params[1])
abline_plot(intercept=params[0], slope=params[1], ax=ax, color='red')
np.random.seed(12345)
nobs = 200
beta_true = np.array([3, 1, 2.5, 3, -4])
X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1))
# stack a constant in front
X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X]
mc_iter = 500
contaminate = .25 # percentage of response variables to contaminate
all_betas = []
for i in range(mc_iter):
y = np.dot(X, beta_true) + np.random.normal(size=200)
random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs))
y[random_idx] = np.random.uniform(-750, 750)
beta_hat = sm.RLM(y, X).fit().params
all_betas.append(beta_hat)
all_betas = np.asarray(all_betas)
se_loss = lambda x : np.linalg.norm(x, ord=2)**2
se_beta = lmap(se_loss, all_betas - beta_true)
np.array(se_beta).mean()
all_betas.mean(0)
beta_true
se_loss(all_betas.mean(0) - beta_true)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An M-estimator minimizes the function
Step2: Andrew's Wave
Step3: Hampel's 17A
Step4: Huber's t
Step5: Least Squares
Step6: Ramsay's Ea
Step7: Trimmed Mean
Step8: Tukey's Biweight
Step9: Scale Estimators
Step10: The mean is not a robust estimator of location
Step11: The median, on the other hand, is a robust estimator with a breakdown point of 50%
Step12: Analogously for the scale
Step13: Median Absolute Deviation
Step14: Another robust estimator of scale is the Interquartile Range (IQR)
Step15: The IQR is less robust than the MAD in the sense that it has a lower breakdown point
Step16: The default for Robust Linear Models is MAD
Step17: Duncan's Occupational Prestige data - M-estimation for outliers
Step18: Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points
Step19: Why? Because M-estimators are not robust to leverage points.
Step20: Let's delete that line
Step21: MM estimators are good for this type of problem, unfortunately, we do not yet have these yet.
Step22: Note
Step23: Exercise
Step24: Squared error loss
|
2,515
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from __future__ import division
capital_base = 100000
r_p = 0.05 # Aggregate performance of assets in the portfolio
r_no_lvg = capital_base * r_p
print 'Portfolio returns without leverage: {0}'.format(r_no_lvg)
debt = 100000
r_lvg = (capital_base + debt) * r_p
r_lvg_pct = r_lvg / capital_base
# Returns are calculated over the initial capital base
# Think of the debt as an asset purchased and added to the portfolio
lvg_ratio = (debt + capital_base) / capital_base
print 'Portfolio returns with leverage: {0}'.format(r_lvg)
print 'Percentage returns with {1}x leverage: {0}'.format(r_lvg_pct, lvg_ratio)
capital_base = 100000
debt = 50000
i = 0.02
r_p = 0.05
int_pmt = i * debt
r_lvg = (capital_base + debt) * r_p
r_total = r_lvg - int_pmt
r_pct_lvg = r_total / capital_base
lvg_ratio = (capital_base + debt) / capital_base
print 'Portfolio returns with leverage and interest: {0}'.format(r_total)
print 'Percentage returns with {1}x leverage and {2}% interest: {0}'.format(r_pct_lvg, lvg_ratio, i * 100)
bt = get_backtest('57e297562a42c9103c11a920')
recorded_vars = bt.recorded_vars
leverage = recorded_vars['leverage']
daily_performance = bt.daily_performance
daily_returns = daily_performance['returns']
plt.plot(leverage);
plt.title("Leverage Ratio of a Trading Algorithm Over Time")
plt.xlabel('Date');
plt.ylabel('Leverage Ratio');
plt.legend();
plt.plot(daily_returns);
plt.xlabel('Date');
plt.ylabel('Daily Returns');
plt.legend();
print "Mean return: ", daily_returns.mean()
print "Volatility: ", daily_returns.std()
# Note these are all expected returns. You need to validate that
# your strategy will continue to produce these returns and volatility using other methods.
# Just measuring historically is a very poor predictor of future performance.
# We'll for now assume you've already validated that you can expect to keep seeing numbers similar to these.
strat_A_ann_return = 0.22
strat_A_ann_vol = 0.15
strat_B_ann_return = 0.05
strat_B_ann_vol = 0.02
# We'll assume a risk free rate of 0.02
risk_free_rate = 0.02
print 'Strategy A Sharpe: %s' % ((strat_A_ann_return - risk_free_rate) / strat_A_ann_vol)
print 'Strategy B Sharpe: %s' % ((strat_B_ann_return - risk_free_rate) / strat_B_ann_vol)
# Add in leverage to B
leverage = 3
# Expressed in returns
print 'Strategy B Sharpe: %s' % (
(strat_B_ann_return * leverage - risk_free_rate * leverage) / (strat_B_ann_vol * leverage)
)
print 'Strategy B Levered Annual Returns: %s' % (
(strat_B_ann_return * leverage - risk_free_rate * leverage)
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is what portfolio returns look like without leverage. Let's add some debt, leveraging the portfolio, and see how the returns change.
Step2: This is the ideal situation, that someone would lend you money without asking for anything in return. It results in double the effective additive returns of an unlevered strategy, which is just delightful. However, we know that in the real world there is no way that this would actually happen. Let's consider what happens when we add in the effects of an interest payment in our one-period model.
Step3: That makes a lot more sense. It would be unreasonable for us to assume that we can add someone else's money to our portfolio without some sort of repayment schedule. Our returns are not as high as they were in the levered portfolio with no interest, but we are still gaining a greater amount of wealth by using leverage, despite the interest rates. As long as we have a reliable strategy that can make sufficient returns to offset the cost of debt we will be able to benefit from levering a portfolio.
Step4: Here is the leverage ratio of this algorithm plotted over time. Notice how it jumps around quite frequently. The ratio is below $1$ when it is not using all of its base capital and it spikes above $1$ whenever it makes a trade on margin. The algorithm associated with this leverage ratio is a long-short equity algorithm based on a combination of fundamental factors. For an overview of how a long-short equity strategy works, please see the lectures page.
Step5: Using leverage can be dangerous when you are dealing with a more volatile strategy. Because you are trading with borrowed money, we are on the hook to return it. We have to make sure that the broker gets his money back before we get our profit. If we end up in a position where we get a margin call, we have to pony up more funds if we want to hold our positions. Monitoring your strategy's volatility and ensuring you are only taking on palatable amounts of debt are key aspects of determining the quality of your trading strategy.
|
2,516
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# check tf version
print(tf.__version__)
a = tf.constant(2)
b = tf.constant(5)
operation = tf.add(a, b, name='cons_add')
with tf.Session() as ses:
print ses.run(operation)
sub_operation = tf.subtract(a, b, name='cons_subtraction')
x = tf.constant([[-1.37 + 2.57j], [-3.37 + 5.33j]])
abs_function = tf.abs(x)
with tf.Session() as ses:
print ses.run(abs_function)
pos_tensor = tf.constant([[5],[7]])
negative_function = tf.negative(pos_tensor)
with tf.Session() as ses:
print ses.run(negative_function)
sign_tensor = tf.constant([[5]])
sign_function = tf.sign(sign_tensor)
with tf.Session() as ses:
print ses.run(sign_function)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Config Contants
Step2: en la variable "b" vamos a asignar una constante con el valor inicial de "5"
Step3: En la siguiente variable "operation" definiremos una suma mediante la aplicación de "add". Como un parámetro vamos a utilizar las constantes definidas anteriormente. "a" y "b"
Step4: link documentacion oficial - add
Step5: Constants - Subtraction
Step6: link documentacion oficial - subtract
Step7: official documentation
Step8: tf.negative
Step9: official documentation
Step10: tf.sign
Step11: official documentation
|
2,517
|
<ASSISTANT_TASK:>
Python Code:
# Выделяем outdoor'ы и indoor'ы.
sample_out = sample[result[:, 0] == 1]
sample_in = sample[result[:, 1] == 1]
result_out = result[result[:, 0] == 1]
result_in = result[result[:, 1] == 1]
# Считаем размер indoor- и outdoor-частей в train'е.
train_size_in = int(sample_in.shape[0] * 0.75)
train_size_out = int(sample_out.shape[0] * 0.75)
# Разделяем outdoor'ы и indoor'ы на обучающую и тестовую часть.
x_train_out, x_test_out = np.split(sample_out, [train_size_out])
y_train_out, y_test_out = np.split(result_out, [train_size_out])
x_train_in, x_test_in = np.split(sample_in, [train_size_in])
y_train_in, y_test_in = np.split(result_in, [train_size_in])
# Делаем общий train и test, смешивая indoor'ы и outdoor'ы.
x_train = np.vstack([x_train_in, x_train_out])
y_train = np.vstack([y_train_in, y_train_out])
x_test = np.vstack([x_test_in, x_test_out])
y_test = np.vstack([y_test_in, y_test_out])
def softmax(W, x):
# Функция logsumexp более стабтильно вычисляет функцию экспонент, почти
# избавляя нас от проблемы переполнения.
p = np.dot(x, W.T)
return np.exp(p - scm.logsumexp(p, axis=1).reshape(-1, 1))
def loss(y, softmax, W, l):
# Формула из Википедии по ссылке выше c добавленным регуляризатором.
return np.mean(-np.sum(y * np.log(softmax), axis=1)) + l * np.trace(W @ W.T) / (2 * y.shape[0])
# Считаем средний по всем картинкам градиент.
# Градиент у нас будет не вектор, как мы привыкли, а матрица 2x3073.
def gradients(W, x, y, l):
p = softmax(W, x)
grads = (p - y).T @ x + l * W
return grads / x.shape[0] # По максимимум матричных вычислений!
# Выбор шага по правилу Армихо из семинарского листочка.
def armijo(W, x, y, l, alpha=0.5, beta=0.5):
s = 1
grad = gradients(W, x, y, l)
dW = -grad # Направление спуска.
loss_1 = loss(y_train, softmax(W + s * dW, x), W, l)
loss_0 = loss(y_train, softmax(W, x), W, l)
while loss_1 > loss_0 + alpha * s * (grad * dW).sum():
s = beta * s
loss_1 = loss(y_train, softmax(W + s * dW, x), W, l)
loss_0 = loss(y_train, softmax(W, x), W, l)
return s
def classify(x_train, x_test, y_train, y_test, iters, l):
# Как было замечено выше, W Размера 2 на 3072, а b размера 2, но мы приписываем b к W.
W = np.zeros((2, 3072))
b = np.zeros(2)
# Для приписывания запишем b как вектор столбец и воспользуемся функцией hstack.
b = b.reshape(b.size, 1)
W = np.hstack([W, b])
# Соответственно, нужно поменять x_train и x_test, добавив по 1 снизу.
fictious = np.ones((x_train.shape[0], 1))
x_train = np.hstack([x_train, fictious])
fictious = np.ones((x_test.shape[0], 1))
x_test = np.hstack([x_test, fictious])
# Будем записывать потери на каждом шаге спуска.
losses_train = [loss(y_train, softmax(W, x_train), W, l)]
losses_test = [loss(y_test, softmax(W, x_test), W, l)]
# Собственно, сам спуск.
for i in tqdm.tqdm(np.arange(iters)):
# Именно так - в Армихо подставляется alpha = l, а l = 0!
# Потому что я накосячил и не заметил! =)
eta = armijo(W, x_train, y_train, 0, l)
W = W - eta * gradients(W, x_train, y_train, l)
losses_train.append(loss(y_train, softmax(W, x_train), W, l))
losses_test.append(loss(y_test, softmax(W, x_test), W, l))
# На выходе имеется оптимальное значение W и массивы потерь.
return W, losses_train, losses_test
l = 0.04 # Сработает лучше, чем вообще без регуляризатора (l = 0).
# Нам хватит и 100 итераций, переобучение начинается достаточно быстро.
W, losses_train, losses_test = classify(x_train, x_test, y_train, y_test, 100, l)
plt.plot(losses_train, color='green', label='train')
plt.plot(losses_test, color='red', label='test')
plt.xlabel('Gradient descent iteration')
plt.ylabel('Loss')
plt.legend()
plt.show()
iters = np.argmin(losses_test) # На этой итиреации ошибка на тесте минимальна.
# Делаем столько итераций.
W, losses_train, losses_test = classify(x_train, x_test, y_train, y_test, iters, l)
# Добавляем 1 к выборке.
nx_test = np.hstack([x_test, np.ones(x_test.shape[0]).reshape(x_test.shape[0], 1)])
probabilities = softmax(W, nx_test) # Считаем вероятности.
recognized = np.argmax(probabilities, axis=1) # Что распознано.
answers = np.argmax(y_test, axis=1) # Правильные ответы.
np.sqrt(np.mean((recognized - answers) ** 2)) # Собственно, ошибка.
# Добавляем 1 к выборке.
ntest = np.hstack([test, np.ones(test.shape[0]).reshape(test.shape[0], 1)])
probabilities = softmax(W, ntest) # Считаем вероятности.
ress = np.argmax(probabilities, axis=1).reshape(-1, 1) # Что распознано.
# Осталось загнать все в табличку, чтобы ее записать в csv.
ids = np.arange(ress.size).reshape(-1, 1)
submit = np.hstack([ids, ress])
# Заполняем csv-шник.
import csv
with open('submission.csv', 'w', newline='') as csvfile:
submission = csv.writer(csvfile, delimiter=',')
submission.writerow(['id', 'res'])
submission.writerows(submit)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Для каждой картинки мы хотим найти вектор $(p_0, p_1)$, вероятностей такой, что $p_i$ - вероятность того, что картинка принадлежит классу $i$ ($0$ — outdoor, $1$ — indoor).
Step2: Посчитаем среднюю квадратичную ошибку на тесте, чтобы прикинуть, что будет на Kaggle.
Step3: Теперь применяем найденную матрицу к исследумемым данным.
|
2,518
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format='retina'
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
names = ["ID","R","I","J","H","KS","TiO_7140","TiO_8465","NaI_8189","Spectral Type","EW_Ha","Gravity"]
tbl1 = pd.read_csv("http://iopscience.iop.org/1538-3881/131/6/3016/fulltext/205148.tb1.txt",
sep='\t', names=names, skip_blank_lines=True, na_values='\ldots')
tbl1.head()
names = ["ID","MJ","AV","log_Teff","log_L_bol"]
tbl2 = pd.read_csv("http://iopscience.iop.org/1538-3881/131/6/3016/fulltext/205148.tb2.txt",
sep='\t', names=names, skip_blank_lines=True, na_values='\ldots')
tbl2.head()
tbl1.to_csv("../data/Slesnick2006/tbl1.csv", sep="\t", index=False)
tbl2.to_csv("../data/Slesnick2006/tbl2.csv", sep="\t", index=False)
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
import gully_custom
SpT, _, _, _ = gully_custom.specTypePlus(tbl1["Spectral Type"])
EW_NaI_8189 = (1.0 - tbl1.NaI_8189.values)*30.0
c = 'Dwarf'
i = tbl1.Gravity.values == c
plt.plot(SpT[i], EW_NaI_8189[i], '.', label = c)
c = 'USco'
i = tbl1.Gravity.values == c
plt.plot(SpT[i], EW_NaI_8189[i], '.', label = c)
c = 'Int'
i = tbl1.Gravity.values == c
plt.plot(SpT[i], EW_NaI_8189[i], '.', label = c)
plt.legend(loc='best')
plt.xlim(0, 15)
plt.ylim(-5, 8.5)
EW_NaI_8189[tbl1.Gravity.values == 'USco']
from astroquery.simbad import Simbad
import astropy.coordinates as coord
import astropy.units as u
customSimbad.list_votable_fields()
bands = ['U','B','V','R','I','J','H','K','u','g','r','i','z']
fluxes = ['fluxdata({})'.format(band) for band in bands]
flux_cols = sum([['FLUX_{}'.format(band), 'FLUX_ERROR_{}'.format(band)] for band in bands], [])
customSimbad = Simbad()
customSimbad.add_votable_fields('otype', 'id', 'sptype', 'flux(U)','flux_error(U)''flux(B)','flux_error(B)''flux(V)','flux_error(V)''flux(R)','flux_error(R)''flux(I)','flux_error(I)''flux(J)','flux_error(J)''flux(H)','flux_error(H)''flux(K)','flux_error(K)''flux(u)','flux_error(u)''flux(g)','flux_error(g)''flux(r)','flux_error(r)''flux(i)','flux_error(i)''flux(z)','flux_error(z)')
input_to_simbad = "[SCH2006] J"+tbl2.ID.str.strip("SCH ")
result = customSimbad.query_objects(input_to_simbad, verbose=True)
result.columns
result[flux_cols][0:5]
print "There were {} sources queried, and {} sources found.".format(len(input_to_simbad), len(result))
if len(input_to_simbad) == len(result):
print "Hooray! Everything matched"
else:
print "Which ones were not found?"
tbl2["Simbad_name"] = input_to_simbad
result["Simbad_name"] = input_to_simbad
simbad_df = result.to_pandas()
del simbad_df["ID"]
tbl2_simbad = pd.merge(tbl2, simbad_df, on="Simbad_name")
tbl2_simbad.columns
tbl2_simbad[[u'MAIN_ID', u'ID', u'RA', u'DEC', u'MJ', u'AV', u'log_Teff', u'log_L_bol',u'OTYPE',
u'SP_TYPE', u'SP_BIBCODE']].head()
tbl2_simbad.to_csv("../data/Slesnick2006/tbl2_plusSimbad.csv", index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table 1- Measured Quantities for PMS Candidates with Observed Spectra
Step2: Table 2 - Derived Quantities for New USco Members
Step3: Save the data tables locally.
Step4: Make a plot
Step5: Turn the ratio into an equivalent width
Step6: Match the sources to SIMBAD
Step7: SCH is a non-standard input for Simbad. Need to put in the 2MASS coordinates.
Step8: Merge the catalogs
|
2,519
|
<ASSISTANT_TASK:>
Python Code:
import collections
import os
import StringIO
import sys
import tarfile
import tempfile
import urllib
from IPython import display
from ipywidgets import interact
from ipywidgets import interactive
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
if tf.__version__ < '1.5.0':
raise ImportError('Please upgrade your tensorflow installation to v1.5.0 or newer!')
# Needed to show segmentation colormap labels
sys.path.append('utils')
import get_dataset_colormap
_MODEL_URLS = {
'xception_coco_voctrainaug': 'http://download.tensorflow.org/models/deeplabv3_pascal_train_aug_2018_01_04.tar.gz',
'xception_coco_voctrainval': 'http://download.tensorflow.org/models/deeplabv3_pascal_trainval_2018_01_04.tar.gz',
}
Config = collections.namedtuple('Config', 'model_url, model_dir')
def get_config(model_name, model_dir):
return Config(_MODEL_URLS[model_name], model_dir)
config_widget = interactive(get_config, model_name=_MODEL_URLS.keys(), model_dir='')
display.display(config_widget)
# Check configuration and download the model
_TARBALL_NAME = 'deeplab_model.tar.gz'
config = config_widget.result
model_dir = config.model_dir or tempfile.mkdtemp()
tf.gfile.MakeDirs(model_dir)
download_path = os.path.join(model_dir, _TARBALL_NAME)
print 'downloading model to %s, this might take a while...' % download_path
urllib.urlretrieve(config.model_url, download_path)
print 'download completed!'
_FROZEN_GRAPH_NAME = 'frozen_inference_graph'
class DeepLabModel(object):
Class to load deeplab model and run inference.
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
def __init__(self, tarball_path):
Creates and loads pretrained deeplab model.
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if _FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
model = DeepLabModel(download_path)
LABEL_NAMES = np.asarray([
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tv'
])
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = get_dataset_colormap.label_to_color_image(FULL_LABEL_MAP)
def vis_segmentation(image, seg_map):
plt.figure(figsize=(15, 5))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])
plt.subplot(grid_spec[0])
plt.imshow(image)
plt.axis('off')
plt.title('input image')
plt.subplot(grid_spec[1])
seg_image = get_dataset_colormap.label_to_color_image(
seg_map, get_dataset_colormap.get_pascal_name()).astype(np.uint8)
plt.imshow(seg_image)
plt.axis('off')
plt.title('segmentation map')
plt.subplot(grid_spec[2])
plt.imshow(image)
plt.imshow(seg_image, alpha=0.7)
plt.axis('off')
plt.title('segmentation overlay')
unique_labels = np.unique(seg_map)
ax = plt.subplot(grid_spec[3])
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0)
plt.show()
# Note that we are using single scale inference in the demo for fast
# computation, so the results may slightly differ from the visualizations
# in README, which uses multi-scale and left-right flipped inputs.
IMAGE_DIR = 'g3doc/img'
def run_demo_image(image_name):
try:
image_path = os.path.join(IMAGE_DIR, image_name)
orignal_im = Image.open(image_path)
except IOError:
print 'Failed to read image from %s.' % image_path
return
print 'running deeplab on image %s...' % image_name
resized_im, seg_map = model.run(orignal_im)
vis_segmentation(resized_im, seg_map)
_ = interact(run_demo_image, image_name=['image1.jpg', 'image2.jpg', 'image3.jpg'])
def get_an_internet_image(url):
if not url:
return
try:
# Prefix with 'file://' for local file.
if os.path.exists(url):
url = 'file://' + url
f = urllib.urlopen(url)
jpeg_str = f.read()
except IOError:
print 'invalid url: ' + url
return
orignal_im = Image.open(StringIO.StringIO(jpeg_str))
print 'running deeplab on image %s...' % url
resized_im, seg_map = model.run(orignal_im)
vis_segmentation(resized_im, seg_map)
_ = interact(get_an_internet_image, url='')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Select and download models
Step5: Load model in TensorFlow
Step6: Helper methods
Step7: Run on sample images
Step8: Run on internet images
|
2,520
|
<ASSISTANT_TASK:>
Python Code:
from atmPy.aerosols.instruments.POPS import mie
%matplotlib inline
import matplotlib.pylab as plt
plt.rcParams['figure.dpi'] = 200
d,amp = mie.makeMie_diameter(noOfdiameters=1000)
f,a = plt.subplots()
a.plot(d,amp)
a.loglog()
a.set_xlim((0.1,3))
a.set_ylabel('Signal intensity (arb. u.)')
a.set_xlabel('Diameter ($\mu$m)')
noofpoints = 1000
d,amp405 = mie.makeMie_diameter(noOfdiameters=noofpoints, WavelengthInUm=0.405)
d,amp445 = mie.makeMie_diameter(noOfdiameters=noofpoints, WavelengthInUm=0.445)
f,a = plt.subplots()
a.plot(d, amp405, label = '405')
a.plot(d, amp445, label = '445')
a.loglog()
lim = [0.14, 3]
arglim = [abs(d - lim[0]).argmin(), abs(d - lim[1]).argmin()]
# arglim
scs_at_lim_405= amp405[arglim]
# scs_at_lim_405
#w the lower detection limit will go up to
d[abs(amp445 - scs_at_lim_405[0]).argmin()]
nop = 1000
dI,ampI = mie.makeMie_diameter(noOfdiameters=nop, IOR=1.4)
dII,ampII = mie.makeMie_diameter(noOfdiameters=nop, IOR=1.5)
dIII,ampIII = mie.makeMie_diameter(noOfdiameters=nop, IOR=1.6)
f,a = plt.subplots()
a.plot(dI,ampI)
a.plot(dII,ampII)
a.plot(dIII,ampIII)
a.loglog()
a.set_xlim((0.1,3))
a.set_ylabel('Signal intensity (arb. u.)')
a.set_xlabel('Diameter ($\mu$m)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: standard settings
Step2: Wavelength dependence
Step3: refractive index dependence
|
2,521
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import itertools
import pandas as pd
# In Python 2.7 the division of integers is not float. Do this to have 1 / 2 = .5
from __future__ import division
# Number of simulations
S = 1000
# Number of observations in each sample
N = [10, 100, 1000]
# True parameter values
theta = [.1, .3, .5]
def rejections(n, p, s):
Compute rejection probabilties.
Parameters
----------
n : int
Number of draws
p : float
Success probability
s : int
Number of simulations
Returns
-------
r1, r2 : float
Rejection probabilities for two CI
# Generate the data
X = np.random.binomial(n, p, s)
# Compute the estimator of success probability
theta_hat = X / n
# Compute two statistics in CI
W1 = np.abs(n**.5 * (theta_hat - p) / (theta_hat * (1 - theta_hat))**.5)
W2 = np.abs(2 * n**.5 * (np.arcsin(theta_hat**.5) - np.arcsin(p**.5)))
# Compute the fraction of rejections
r1 = np.mean(W1 > 1.96)
r2 = np.mean(W2 > 1.96)
return r1, r2
# Initialyze containers for the labels and data
index, df = [], []
# To avoid multiple loops, use cartesian product of all options to check
for (n, p) in itertools.product(N, theta):
r = rejections(n, p, S)
# Append results to labels and data
df.append(r)
index.append([n, p])
# Construct multi-level index
index = pd.MultiIndex.from_tuples(index, names=['n', 'p'])
# Construct pandas DataFrame for nice output
table = pd.DataFrame(df, columns=['r1', 'r2'], index=index)
print(table.unstack('p'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define parameters of the simulation
Step3: Define the function that returns rejection probabilities
Step4: Run simulation study
|
2,522
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
A = np.array([1,1,2,3,3,3,4,5,6,7,8,8])
B = np.array([1,2,8])
C = A[np.in1d(A,B)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,523
|
<ASSISTANT_TASK:>
Python Code:
%cd -q ~/neurokernel/examples/sensory_int/data
%run gen_vis_input.py
%run gen_olf_input.py
%run gen_integrate.py
%cd -q ~/neurokernel/examples/sensory_int/
%run sensory_int_demo.py
%run visualize_output.py
import IPython.display
IPython.display.YouTubeVideo('e-eUOtOF9fc')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once the input and the configuration are ready, we execute the entire model. Note that the interconnections between the integration LPU and both the antennal lobe and medulla LPUs are configured in the simulation script rather than in a GEXF file.
Step2: Next, we generate a video to show the final result
Step3: The resulting video can be viewed below
|
2,524
|
<ASSISTANT_TASK:>
Python Code:
print('abc')
print(1, 2, 3)
print(1, 2, 3, sep='--')
def fibonacci(N):
L = []
a, b = 0, 1
while len(L) < N:
a, b = b, a + b
L.append(a)
return L
fibonacci(10)
def real_imag_conj(val):
return val.real, val.imag, val.conjugate()
r, i, c = real_imag_conj(3 + 4j)
print(r, i, c)
def fibonacci(N, a=0, b=1):
L = []
while len(L) < N:
a, b = b, a + b
L.append(a)
return L
fibonacci(10)
fibonacci(10, 0, 2)
fibonacci(10, b=3, a=1)
def catch_all(*args, **kwargs):
print("args =", args)
print("kwargs = ", kwargs)
catch_all(1, 2, 3, a=4, b=5)
catch_all('a', keyword=2)
add = lambda x, y: x + y
add(1, 2)
def add(x, y):
return x + y
data = [{'first':'Guido', 'last':'Van Rossum', 'YOB':1956},
{'first':'Grace', 'last':'Hopper', 'YOB':1906},
{'first':'Alan', 'last':'Turing', 'YOB':1912}]
sorted([2,4,3,5,1,6])
# sort alphabetically by first name
sorted(data, key=lambda item: item['first'])
# sort by year of birth
sorted(data, key=lambda item: item['YOB'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Aqui, print() es el nombre de la función, y 'abc' es lo que se llama un argumento (de la función).
Step2: Cuando se usan argumentos y argumentos con nombre, los argumentos con nombre siempre deben aparecer al final, pero en cualquier orden.
Step3: Ahora ya tenemos una función que se llama fibonacci y que toma un solo argumento llamado N, hace cosas incontables con este argumento, y retorna un valor. En este caso, la lista de los N primeros números de la serie de Fibonacci.
Step4: Para aquellos que sepan algo de lenguajes que son "tipeados" (a los que hay que asignar un tipo de datos a cada variable), verán que no hay información de los tipos asociados a los argumentos o valores de retorno de la función. Las funciones de Python pueden retornar cualquier tipo de objeto de Python, simple o complejo.
Step5: Valores por "omisión" de los argumentos
Step6: Con un solo argumento, el resultado de llamar a la función es idéntico al anterior
Step7: Pero ahora podemos usar la función para explorar otros valores
Step8: Si especificamos el nombre de los argumentos con clave, no es necesario el orden
Step9: *args and **kwargs
Step10: Noten que aqui los nombres args y kwargs no son importantes, sino los asteriscos
Step11: que es mas o menos equivalente a
Step12: Como todo es un objeto en Python, también podemos pasar funciones como argumento de otras funciones! Como ejemplo, tomemos algunos datos almacenados en una lista de diccionarios (!)
Step13: Supongamos que queremos ordenar la lista por año de nacimiento. Las listas efectivamente tienen un método sort
Step14: Pero los diccionarios no
|
2,525
|
<ASSISTANT_TASK:>
Python Code:
a_set = {1, 2, 3}
a_set
empty_set = set() # you have to use set() to create an empty set! (we will see why later)
print(empty_set)
a_set = {1, 2, 1, 1}
print(a_set)
a_set = {1, 3, 2}
print(a_set)
{1, 2, 3} == {2, 3, 1}
a_set = {1, 'a'}
print(a_set)
a_set = {1, []}
a_set = set()
a_set.add(1)
print(a_set)
a_set = set()
a_set = a_set.add(1)
print(a_set)
dir(set)
help(set.union)
set1 = {1, 2, 3, 4, 5}
set2 = {4, 5, 6, 7, 8}
the_union = set1.union(set2)
print(the_union)
set1 = {1, 2, 3, 4, 5}
set2 = {4, 5, 6, 7, 8}
set3 = {5, 6, 7, 8, 9}
the_union = set1.union(set2, set3)
print(the_union)
help(set.intersection)
set1 = {1, 2, 3, 4, 5}
set2 = {4, 5, 6, 7, 8}
the_intersection = set1.intersection(set2)
print(the_intersection)
set1 = {1, 2, 3, 4, 5}
set2 = {4, 5, 6, 7, 8}
set3 = {5, 8, 9, 10}
the_intersection = set1.intersection(set2, set3)
print(the_intersection)
a_set = set()
a_set.add(1)
a_set.add(2)
a_set[0]
nums = {3, 41, 12, 9, 74, 15}
print(len(nums)) # number of items in a set
print(max(nums)) # highest value in a set
print(min(nums)) # lowest value in a set
print(sum(nums)) # sum of all values in a set
set_a = {1, 2, 3}
set_b = {4, 5, 6}
an_element = 4
print(set_a)
#do some operations
set_a.add(an_element) # Add an_element to set_a
print(set_a)
set_a.update(set_b) # Add the elements of set_b to set_a
print(set_a)
set_a.pop() # Remove and return an arbitrary set element. How does this compare to the list method pop?
print(set_a)
set_a.remove(an_element) # Remove an_element from set_a
print(set_a)
dir(set)
set_1 = {'just', 'some', 'words'}
set_2 = {'some', 'other', 'words'}
# your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Curly brackets surround sets, and commas separate the elements in the set
Step2: Please note that sets are unordered. This means that it can occur that if you print a set, it looks different than how you created it
Step3: This also means that you can check if two sets are the same even if you don't know the order in which items were put in
Step4: Please note that sets can only contain immutable objects. Hence the following examples will work, since we are adding immutable objects
Step5: But the following example will result in an error, since we are trying to create a set with a mutable object
Step6: 2. How to add items to a set
Step7: 3. How to extract/inspect items in a set
Step8: You observe that there are many methods defined for sets! Here we explain the two most common methods. We start with the union method.
Step9: Python shows dots (...) for the parameters of the union method. Based on the docstring, we learn that we can provide any number of sets, and Python will return the union of them.
Step10: The intersection method has works in a similar manner as the union method, but returns a new set containing only the intersection of the sets.
Step11: Since sets are unordered, you can not use an index to extract an element from a set.
Step12: 4. Using built-in functions on sets
Step13: 5. An overview of set operations
Step14: Before diving into some exercises, you may want to the dir built-in function again to see an overview of all set methods
Step15: Exercises
|
2,526
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
raw_data = {'first_name': ['Jason', np.nan, 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', np.nan, 'Ali', 'Milner', 'Cooze'],
'age': [42, np.nan, 36, 24, 73],
'sex': ['m', np.nan, 'f', 'm', 'f'],
'preTestScore': [4, np.nan, np.nan, 2, 3],
'postTestScore': [25, np.nan, np.nan, 62, 70]}
df = pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'sex', 'preTestScore', 'postTestScore'])
df
df_no_missing = df.dropna()
df_no_missing
df_cleaned = df.dropna(how='all')
df_cleaned
df['location'] = np.nan
df
df.dropna(axis=1, how='all')
df.dropna(thresh=5)
df.fillna(0)
df["preTestScore"].fillna(df["preTestScore"].mean(), inplace=True)
df
df["postTestScore"].fillna(df.groupby("sex")["postTestScore"].transform("mean"), inplace=True)
df
# Select the rows of df where age is not NaN and sex is not NaN
df[df['age'].notnull() & df['sex'].notnull()]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create dataframe with missing values
Step2: Drop missing observations
Step3: Drop rows where all cells in that row is NA
Step4: Create a new column full of missing values
Step5: Drop column if they only contain missing values
Step6: Drop rows that contain less than five observations
Step7: Fill in missing data with zeros
Step8: Fill in missing in preTestScore with the mean value of preTestScore
Step9: Fill in missing in postTestScore with each sex's mean value of postTestScore
Step10: Select some raws but ignore the missing data points
|
2,527
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all Shogun classes
from shogun import *
from matplotlib.patches import Ellipse
# a tool for visualisation
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
Returns an ellipse artist for nstd times the standard deviation of this
Gaussian, specified by mean and covariance
# compute eigenvalues (ordered)
vals, vecs = eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
# create mixture of three Gaussians
num_components=3
num_max_samples=100
gmm=GMM(num_components)
dimension=2
# set means (TODO interface should be to construct mixture from individuals with set parameters)
means=zeros((num_components, dimension))
means[0]=[-5.0, -4.0]
means[1]=[7.0, 3.0]
means[2]=[0, 0.]
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
# set covariances
covs=zeros((num_components, dimension, dimension))
covs[0]=array([[2, 1.3],[.6, 3]])
covs[1]=array([[1.3, -0.8],[-0.8, 1.3]])
covs[2]=array([[2.5, .8],[0.8, 2.5]])
[gmm.set_nth_cov(covs[i],i) for i in range(num_components)]
# set mixture coefficients, these have to sum to one (TODO these should be initialised automatically)
weights=array([0.5, 0.3, 0.2])
gmm.put('m_coefficients', weights)
# now sample from each component seperately first, the from the joint model
colors=["red", "green", "blue"]
for i in range(num_components):
# draw a number of samples from current component and plot
num_samples=int(rand()*num_max_samples)+1
# emulate sampling from one component (TODO fix interface of GMM to handle this)
w=zeros(num_components)
w[i]=1.
gmm.put('m_coefficients', w)
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_samples)])
plot(X[:,0], X[:,1], "o", color=colors[i])
# draw 95% elipsoid for current component
gca().add_artist(get_gaussian_ellipse_artist(means[i], covs[i], color=colors[i]))
_=title("%dD Gaussian Mixture Model with %d components" % (dimension, num_components))
# since we used a hack to sample from each component
gmm.put('m_coefficients', weights)
# generate a grid over the full space and evaluate components PDF
resolution=100
Xs=linspace(-10,10, resolution)
Ys=linspace(-8,6, resolution)
pairs=asarray([(x,y) for x in Xs for y in Ys])
D=asarray([gmm.cluster(pairs[i])[3] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,2,1)
pcolor(Xs,Ys,D)
xlim([-10,10])
ylim([-8,6])
title("Log-Likelihood of GMM")
subplot(1,2,2)
pcolor(Xs,Ys,exp(D))
xlim([-10,10])
ylim([-8,6])
_=title("Likelihood of GMM")
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_max_samples)])
plot(X[:,0], X[:,1], "o")
_=title("Samples from GMM")
def estimate_gmm(X, num_components):
# bring data into shogun representation (note that Shogun data is in column vector form, so transpose)
feat=features(X.T)
gmm_est=GMM(num_components)
gmm_est.set_features(feat)
# learn GMM
gmm_est.train_em()
return gmm_est
component_numbers=[2,3]
# plot true likelihood
D_true=asarray([gmm.cluster(pairs[i])[num_components] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,len(component_numbers)+1,1)
pcolor(Xs,Ys,exp(D_true))
xlim([-10,10])
ylim([-8,6])
title("True likelihood")
for n in range(len(component_numbers)):
# TODO get rid of these hacks and offer nice interface from Shogun
# learn GMM with EM
gmm_est=estimate_gmm(X, component_numbers[n])
# evaluate at a grid of points
D_est=asarray([gmm_est.cluster(pairs[i])[component_numbers[n]] for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise densities
subplot(1,len(component_numbers)+1,n+2)
pcolor(Xs,Ys,exp(D_est))
xlim([-10,10])
ylim([-8,6])
_=title("Estimated likelihood for EM with %d components"%component_numbers[n])
# function to draw ellipses for all components of a GMM
def visualise_gmm(gmm, color="blue"):
for i in range(gmm.get_num_components()):
component=Gaussian.obtain_from_generic(gmm.get_component(i))
gca().add_artist(get_gaussian_ellipse_artist(component.get_mean(), component.get_cov(), color=color))
# multiple runs to illustrate random initialisation matters
for _ in range(3):
figure(figsize=(18,5))
subplot(1, len(component_numbers)+1, 1)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color="blue")
title("True components")
for i in range(len(component_numbers)):
gmm_est=estimate_gmm(X, component_numbers[i])
subplot(1, len(component_numbers)+1, i+2)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color=colors[i])
# TODO add a method to get likelihood of full model, retraining is inefficient
likelihood=gmm_est.train_em()
_=title("Estimated likelihood: %.2f (%d components)"%(likelihood,component_numbers[i]))
def cluster_and_visualise(gmm_est):
# obtain cluster index for each point of the training data
# TODO another hack here: Shogun should allow to pass multiple points and only return the index
# as the likelihood can be done via the individual components
# In addition, argmax should be computed for us, although log-pdf for all components should also be possible
clusters=asarray([argmax(gmm_est.cluster(x)[:gmm.get_num_components()]) for x in X])
# visualise points by cluster
for i in range(gmm.get_num_components()):
indices=clusters==i
plot(X[indices,0],X[indices,1], 'o', color=colors[i])
# learn gmm again
gmm_est=estimate_gmm(X, num_components)
figure(figsize=(18,5))
subplot(121)
cluster_and_visualise(gmm)
title("Clustering under true GMM")
subplot(122)
cluster_and_visualise(gmm_est)
_=title("Clustering under estimated GMM")
figure(figsize=(18,5))
for comp_idx in range(num_components):
subplot(1,num_components,comp_idx+1)
# evaluated likelihood under current component
# TODO Shogun should do the loop and allow to specify component indices to evaluate pdf for
# TODO distribution interface should be the same everywhere
component=Gaussian.obtain_from_generic(gmm.get_component(comp_idx))
cluster_likelihoods=asarray([component.compute_PDF(X[i]) for i in range(len(X))])
# normalise
cluster_likelihoods-=cluster_likelihoods.min()
cluster_likelihoods/=cluster_likelihoods.max()
# plot, coloured by likelihood value
cm=get_cmap("jet")
for j in range(len(X)):
color = cm(cluster_likelihoods[j])
plot(X[j,0], X[j,1] ,"o", color=color)
title("Data coloured by likelihood for component %d" % comp_idx)
# compute cluster index for every point in space
D_est=asarray([gmm_est.cluster(pairs[i])[:num_components].argmax() for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise clustering
cluster_and_visualise(gmm_est)
# visualise space partitioning
pcolor(Xs,Ys,D_est)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gaussian Mixture Models and Expectation Maximisation in Shogun
Step2: Set up the model in Shogun
Step3: Sampling from mixture models
Step4: Evaluating densities in mixture Models
Step5: Density estimating with mixture models
Step6: Imagine you did not know the true generating process of this data. What would you think just looking at it? There are clearly at least two components (or clusters) that might have generated this data, but three also looks reasonable. So let us try to learn a Gaussian mixture model on those.
Step7: So far so good, now lets plot the density of this GMM using the code from above
Step8: It is also possible to access the individual components of the mixture distribution. In our case, we can for example draw 95% ellipses for each of the Gaussians using the method from above. We will do this (and more) below.
Step9: Clustering with mixture models
Step10: These are clusterings obtained via the true mixture model and the one learned via EM. There is a slight subtlety here
Step11: Note how the lower left and middle cluster are overlapping in the sense that points at their intersection have similar likelihoods. If you do not care at all about this and are just interested in a partitioning of the space, simply choose the maximum.
|
2,528
|
<ASSISTANT_TASK:>
Python Code:
from pscript import py2js, evalpy
js = py2js('for i in range(10): print(i)')
print(js)
def foo(x):
res = []
for i in range(x):
res.append(i**2)
return res
js = py2js(foo)
print(js)
def foo(x):
return [i**2 for i in range(x)]
js = py2js(foo)
print(js)
class Bar:
def spam(self):
return 3 + 4
#js = py2js(Bar)
# This only works if Bar is defined in an actual module.
evalpy('print(3 + 4)')
evalpy('print(None)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can transpile strings of Python code
Step2: Or actual Python functions
Step3: Let's try that again, but now with a list comprehension. (The JS is valid and will run fine, though its less readable.)
Step4: Classes are also supported, but not in the notebook (apparently Python cannot retrieve the source code in this case).
Step5: Using evalpy you can evaluate Python code in NodeJS
|
2,529
|
<ASSISTANT_TASK:>
Python Code:
#import pandas for conviently labelled arrays
import pandas
# import numpy for SVD function
import numpy
# import matplotlib.pyplot for visualising arrays
import matplotlib.pyplot as plt
# create a simple word-document matrix as a pandas dataframe, the content values have been normalised
words = ['wheel', ' seat', ' engine', ' slice', ' oven', ' boil']
print(words)
documents = ['doc1', 'doc2', 'doc3', 'doc4', 'doc5', 'doc6']
word_doc = pandas.DataFrame([[0.5,0.3333, 0.25, 0, 0, 0],
[0.25, 0.3333, 0, 0, 0, 0],
[0.25, 0.3333, 0.75, 0,0,0],
[0, 0, 0, 0.5, 0.5, 0.6],
[0, 0, 0, 0.3333, 0.1667, 0],
[0, 0, 0, 0.1667, 0.3333, 0.4]], index=words, columns=documents)
# and show it
word_doc
# create a numpy array from the pandas dataframe
A = word_doc.values
# break it down into an SVD
U, s, VT = numpy.linalg.svd(A, full_matrices=False)
S = numpy.diag(s)
# what are U, S and V
print("U =\n", numpy.round(U, decimals=2), "\n")
print("S =\n", numpy.round(S, decimals=2), "\n")
print("V^T =\n", numpy.round(VT, decimals=2), "\n")
# rebuild A2 from U.S.V
A2 = numpy.dot(U,numpy.dot(S,VT))
print("A2 =\n", numpy.round(A2, decimals=2))
# S_reduced is the same as S but with only the top 2 elements kept
S_reduced = numpy.zeros_like(S)
# only keep top two eigenvalues
l = 2
S_reduced[:l, :l] = S[:l,:l]
# show S_reduced which has less info than original S
print("S_reduced =\n", numpy.round(S_reduced, decimals=2))
# what is the document matrix now?
S_reduced_VT = numpy.dot(S_reduced, VT)
print("S_reduced_VT = \n", numpy.round(S_reduced_VT, decimals=2))
# plot the array
p = plt.subplot(111)
p.axis('scaled'); p.axis([-2, 2, -2, 2]); p.axhline(y=0, color='lightgrey'); p.axvline(x=0, color='lightgrey')
p.set_yticklabels([]); p.set_xticklabels([])
p.set_title("S_reduced_VT")
p.plot(S_reduced_VT[0,],S_reduced_VT[1,],'ro')
plt.show()
# topics are a linear combination of original words
U_S_reduced = numpy.dot(U, S_reduced)
df = pandas.DataFrame(numpy.round(U_S_reduced, decimals=2), index=words)
# show colour coded so it is easier to see significant word contributions to a topic
df.style.background_gradient(cmap=plt.get_cmap('Blues'), low=0, high=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A Simple Word-Document Matrix
Step2: Word-Document Matrix is A
Step3: Now Take the SVD
Step4: We can see above that the values in the diagonal S matrix are ordered by magnitide. There is a significant different between the biggest value 1.1, and the smallest 0.05. The halfway value of 0.28 is still much smaller than the largest.
Step5: Yes, that worked .. the reconstructed A2 is the same as the original A (within the bounds of small floating point accuracy)
Step6: New View Of Documents
Step7: The above shows that there are 2 clusters of documents. That matches our expectations as we constructed the example data set that way.
|
2,530
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array(
[[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15],
[16, 17]]]
)
b = np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]
)
result = np.take_along_axis(a, b[..., np.newaxis], axis=-1)[..., 0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,531
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
# from pandas_datareader import data
# prices = data.GoogleDailyReader(symbols=['GLD', 'GFI'], end='2014-8-1').read().loc['Open', :, :]
prices = pd.read_csv(pm.get_data('stock_prices.csv'))
prices['Date'] = pd.DatetimeIndex(prices['Date'])
prices = prices.set_index('Date')
prices.head()
finite_idx = (np.isfinite(prices.GLD.values)) & (np.isfinite(prices.GFI.values))
prices = prices.iloc[finite_idx]
fig = plt.figure(figsize=(9, 6))
ax = fig.add_subplot(111, xlabel='Price GFI in \$', ylabel='Price GLD in \$')
colors = np.linspace(0.1, 1, len(prices))
mymap = plt.get_cmap("winter")
sc = ax.scatter(prices.GFI, prices.GLD, c=colors, cmap=mymap, lw=0)
cb = plt.colorbar(sc)
cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]);
with pm.Model() as model_reg:
pm.glm.GLM.from_formula('GLD ~ GFI', prices)
trace_reg = pm.sample(2000)
fig = plt.figure(figsize=(9, 6))
ax = fig.add_subplot(111, xlabel='Price GFI in \$', ylabel='Price GLD in \$',
title='Posterior predictive regression lines')
sc = ax.scatter(prices.GFI, prices.GLD, c=colors, cmap=mymap, lw=0)
pm.plot_posterior_predictive_glm(trace_reg[100:], samples=100,
label='posterior predictive regression lines',
lm=lambda x, sample: sample['Intercept'] + sample['GFI'] * x,
eval=np.linspace(prices.GFI.min(), prices.GFI.max(), 100))
cb = plt.colorbar(sc)
cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]);
ax.legend(loc=0);
model_randomwalk = pm.Model()
with model_randomwalk:
# std of random walk, best sampled in log space.
sigma_alpha = pm.Exponential('sigma_alpha', 1./.02, testval = .1)
sigma_beta = pm.Exponential('sigma_beta', 1./.02, testval = .1)
import theano.tensor as tt
# To make the model simpler, we will apply the same coefficient for 50 data points at a time
subsample_n = 50
lendata = len(prices)
ncoef = lendata // subsample_n
idx = range(ncoef * subsample_n)
with model_randomwalk:
alpha = pm.GaussianRandomWalk('alpha', sigma_alpha**-2,
shape=ncoef)
beta = pm.GaussianRandomWalk('beta', sigma_beta**-2,
shape=ncoef)
# Make coefficients have the same length as prices
alpha_r = tt.repeat(alpha, subsample_n)
beta_r = tt.repeat(beta, subsample_n)
with model_randomwalk:
# Define regression
regression = alpha_r + beta_r * prices.GFI.values[idx]
# Assume prices are Normally distributed, the mean comes from the regression.
sd = pm.Uniform('sd', 0, 20)
likelihood = pm.Normal('y',
mu=regression,
sd=sd,
observed=prices.GLD.values[idx])
with model_randomwalk:
trace_rw = pm.sample(2000, njobs=2)
fig = plt.figure(figsize=(8, 6))
ax = plt.subplot(111, xlabel='time', ylabel='alpha', title='Change of alpha over time.')
ax.plot(trace_rw[-1000:]['alpha'].T, 'r', alpha=.05);
ax.set_xticklabels([str(p.date()) for p in prices[::len(prices)//5].index]);
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, xlabel='time', ylabel='beta', title='Change of beta over time')
ax.plot(trace_rw[-1000:]['beta'].T, 'b', alpha=.05);
ax.set_xticklabels([str(p.date()) for p in prices[::len(prices)//5].index]);
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, xlabel='Price GFI in \$', ylabel='Price GLD in \$',
title='Posterior predictive regression lines')
colors = np.linspace(0.1, 1, len(prices))
colors_sc = np.linspace(0.1, 1, len(trace_rw[-500::10]['alpha'].T))
mymap = plt.get_cmap('winter')
mymap_sc = plt.get_cmap('winter')
xi = np.linspace(prices.GFI.min(), prices.GFI.max(), 50)
for i, (alpha, beta) in enumerate(zip(trace_rw[-500::10]['alpha'].T, trace_rw[-500::10]['beta'].T)):
for a, b in zip(alpha, beta):
ax.plot(xi, a + b*xi, alpha=.05, lw=1, c=mymap_sc(colors_sc[i]))
sc = ax.scatter(prices.GFI, prices.GLD, label='data', cmap=mymap, c=colors)
cb = plt.colorbar(sc)
cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets load the prices of GFI and GLD.
Step2: Plotting the prices over time suggests a strong correlation. However, the correlation seems to change over time.
Step3: A naive approach would be to estimate a linear model and ignore the time domain.
Step4: The posterior predictive plot shows how bad the fit is.
Step5: Rolling regression
Step6: Next, we define the regression parameters that are not a single random variable but rather a random vector with the above stated dependence structure. So as not to fit a coefficient to a single data point, we will chunk the data into bins of 50 and apply the same coefficients to all data points in a single bin.
Step7: Perform the regression given coefficients and data and link to the data via the likelihood.
Step8: Inference. Despite this being quite a complex model, NUTS handles it wells.
Step9: Analysis of results
Step10: However, the slope does.
Step11: The posterior predictive plot shows that we capture the change in regression over time much better. Note that we should have used returns instead of prices. The model would still work the same, but the visualisations would not be quite as clear.
|
2,532
|
<ASSISTANT_TASK:>
Python Code:
def execute_notebook(nbfile):
with io.open(nbfile) as f:
nb = current.read(f, 'json')
ip = get_ipython()
for cell in nb.worksheets[0].cells:
if cell.cell_type != 'code':
continue
ip.run_cell(cell.input)
#execute_notebook("PORCENTAGENS ESTADOS.ipynb")
def read_csv2(filename):
with open(filename, "rb") as f:
lines = [l.decode('utf8', 'ignore') for l in f.readlines()]
reader = csv.DictReader(lines , delimiter=';')
return list(reader)
Unsorted_allCandidates = read_csv2("CANDIDATOS_NOTA_NOME_CERTO.csv")
Unsorted_allCandidates[0:50]
display3 = pd.DataFrame(Unsorted_allCandidates)
display3
def make_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%d/%m/%Y')
def make_boolean(data):
if data == "SIM":
return True
elif data =="NÃO":
return False
def make_float(data):
if data =="":
return 0
else:
return float(data)
def make_int(data):
if data =="":
return 0
else:
return int(data)
def tipo_cota_clean(data):
if data =="":
return None
else:
return data
def make_str(data):
if data =="":
return data
else:
return str(data)
def make_upper(data):
return data.upper()
Unsorted_allCandidates[0]
#def make_random_int(data):
# if data =="":
# return random.randint(0 , 1000)
# else:
# return float(data)
for i in Unsorted_allCandidates:
i["nascimento"] = make_date(i["nascimento"])
i["surdo"] = make_boolean(i["surdo"])
i["Cota"] = make_boolean(i["Cota"])
i["Tipo cota"] = tipo_cota_clean(i["Tipo cota"])
i["Pont"] = make_float(i["Pont"])
i["Ano ENEM"] = make_int(i["Ano ENEM"])
i["\ufeffNome"] = make_str(i["\ufeffNome"])
i["\ufeffNome"] = make_upper(i["\ufeffNome"])
#i["Pont"] = make_random_int(i["Pont"])
len(Unsorted_allCandidates)
def polo_selector(polo , data):
for i in data[:]:
if i["Polo desejado"] != polo:
data.remove(i)
return data
polo_selector(polo , Unsorted_allCandidates)
allCandidates = sorted(Unsorted_allCandidates, key = lambda element: element["Pont"], reverse=True)
Display_allCandidatas = pd.DataFrame(allCandidates)
Display_allCandidatas[:50]
def is_surdo(data, field, surdo, ouvinte ):
if data[field]:
return surdo.append(i)
else:
return ouvinte.append(i)
candidatos_surdos = list()
candidatos_ouvintes = list()
for i in allCandidates:
is_surdo(i , "surdo" , candidatos_surdos, candidatos_ouvintes)
len(candidatos_ouvintes)
len(candidatos_surdos)
print ("CHECK - Total de Candidatos = ",(len(candidatos_ouvintes) + len(candidatos_surdos)))
for i in candidatos_ouvintes:
if i["surdo"]:
print ("ERROR = ", i)
for i in candidatos_surdos:
if not i["surdo"]:
print ("ERROR = ",i)
ampla_con , cota_A , cota_B , cota_C , cota_D , cota_E , cota_F , cota_G , cota_H = list(),list(),list(),list(),list(),list(),list(),list(),list()
def cotas(data , field):
counter_AMPLA , counter_A , counter_B , counter_C , counter_D , counter_E ,\
counter_F , counter_G , counter_H, counter_error = 0,0,0,0,0,0,0,0,0,0
for i in data:
if i[field] == None:
counter_AMPLA += 1
ampla_con.append(i)
elif i[field] == "A":
counter_A += 1
cota_A.append(i)
elif i[field] == "B":
counter_B += 1
cota_B.append(i)
elif i[field] == "C":
counter_C += 1
cota_C.append(i)
elif i[field] == "D":
counter_D += 1
cota_D.append(i)
elif i[field] == "E":
counter_E += 1
cota_E.append(i)
elif i[field] == "F":
counter_F += 1
cota_F.append(i)
elif i[field] == "G":
counter_G += 1
cota_G.append(i)
elif i[field] == "H":
counter_H += 1
cota_H.append(i)
else:
counter_error += 1
return counter_AMPLA , counter_A , counter_B , counter_C , counter_D , counter_E ,\
counter_F , counter_G , counter_H, counter_error
cotas(allCandidates , "Tipo cota")
print ((len(cota_A)) +\
(len(cota_B))+\
(len(cota_C))+\
(len(cota_D))+\
(len(cota_E))+\
(len(cota_F))+\
(len(cota_G))+\
(len(cota_H))+\
(len(ampla_con)))
print("COTA A =",len(cota_A),\
"COTA B =",len(cota_B),\
"COTA C =",len(cota_C),\
"COTA D =",len(cota_D),\
"COTA E =",len(cota_E),\
"COTA F =",len(cota_F),\
"COTA G =",len(cota_G),\
"COTA H =",len(cota_H),\
"Ampla =",len(ampla_con))
def clear_aprovados(data, check):
counter = 0
for i in data[:]:
if i in check:
data.remove(i)
counter += 1
print(counter)
surdos_aprovados = candidatos_surdos[:14]
print (len(surdos_aprovados))
print (len(allCandidates))
Alunos_Aprovados_Finais = list()
clear_aprovados(allCandidates , surdos_aprovados)
len(allCandidates)
Display_allCandidatas[:30]
Display_surdos_aprovados = pd.DataFrame(surdos_aprovados)
Display_surdos_aprovados
Ac = A
Bc = B
Cc = C
Dc = D
Ec = E
Fc = F
Gc = G
Hc = H
Amplac = Ampla
print(A,B,C,D,E,F,G,H,Ampla)
counter_vagas = 0
counter_vagas_nao_cota = 0
counter_vagas_ouvintes = 0
counter_vagas_ouvintes_nao_cota = 0
# FIRST FASE - Pega todos os surdos e aplica a regra de cotas e ampla concorrência
for i in surdos_aprovados[:]:
if i in cota_A[:]:
if A > 0:
Alunos_Aprovados_Finais.append(i)
cota_A.remove(i)
A -= 1
counter_vagas+=1
print ("A = ", A)
for i in surdos_aprovados[:]:
if i in cota_B[:]:
if B > 0:
Alunos_Aprovados_Finais.append(i)
cota_B.remove(i)
B -= 1
counter_vagas+=1
print ("B = ", B)
for i in surdos_aprovados[:]:
if i in cota_C[:]:
if C > 0:
Alunos_Aprovados_Finais.append(i)
cota_C.remove(i)
C -= 1
counter_vagas+=1
print ("C = ", C)
for i in surdos_aprovados[:]:
if i in cota_D[:]:
if D > 0:
Alunos_Aprovados_Finais.append(i)
cota_D.remove(i)
D -= 1
counter_vagas+=1
print ("D = ", D)
for i in surdos_aprovados[:]:
if i in cota_E[:]:
if E > 0:
Alunos_Aprovados_Finais.append(i)
cota_E.remove(i)
E -= 1
counter_vagas+=1
print ("E = ", E)
for i in surdos_aprovados[:]:
if i in cota_F[:]:
if F > 0:
Alunos_Aprovados_Finais.append(i)
cota_F.remove(i)
F -= 1
counter_vagas+=1
print ("F = ", F)
for i in surdos_aprovados[:]:
if i in cota_G[:]:
if G > 0:
Alunos_Aprovados_Finais.append(i)
cota_G.remove(i)
G -= 1
counter_vagas+=1
print ("G = ", G)
for i in surdos_aprovados[:]:
if i in cota_H[:]:
if H > 0:
Alunos_Aprovados_Finais.append(i)
cota_H.remove(i)
H -= 1
counter_vagas+=1
print ("H = ", H)
for i in surdos_aprovados[:]:
if i in ampla_con[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
ampla_con.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
# SECOND FASE - Coloca todos os surdos restantes (cotistas)
# na ampla concorrência e retira da lista original de cotas
for i in surdos_aprovados[:]:
if i in cota_A[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_A.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
for i in surdos_aprovados[:]:
if i in cota_B[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_B.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
for i in surdos_aprovados[:]:
if i in cota_C[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_C.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
for i in surdos_aprovados[:]:
if i in cota_D[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_D.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
for i in surdos_aprovados[:]:
if i in cota_E[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_E.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
for i in surdos_aprovados[:]:
if i in cota_F[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_F.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
for i in surdos_aprovados[:]:
if i in cota_G[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_G.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
for i in surdos_aprovados[:]:
if i in cota_H[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
cota_H.remove(i)
Ampla -= 1
counter_vagas_nao_cota+=1
print ("Ampla = ", Ampla)
print("COTAS preenchidas por surdos - ",\
"A =",Ac - A,\
"B =",Bc - B,\
"C =",Cc - C,\
"D =",Dc - D,\
"E =",Ec - E,\
"F =",Fc - F,\
"G =",Gc - G,\
"H =",Hc - H,\
"Ampla =",Amplac - Ampla)
print("SOBRA DE COTAS para ouvintes - " , "A =",A,\
"B =",B,\
"C =",C,\
"D =",D,\
"E =",E,\
"F =",F,\
"G =",G,\
"H =",H,\
"Ampla =",Ampla)
print("Total de vagas de COTAS preenchidas por SURDOS = ",counter_vagas)
print("Total de vagas de AMPLA preenchidas por SURDOS = ",counter_vagas_nao_cota)
# FIRST FASE - Pega todos os OUVINTES e aplica a regra de cotas e ampla concorrência
for i in candidatos_ouvintes[:]:
if i in cota_A[:]:
if A > 0:
Alunos_Aprovados_Finais.append(i)
cota_A.remove(i)
A -= 1
counter_vagas_ouvintes+=1
print ("A = ", A)
for i in candidatos_ouvintes[:]:
if i in cota_B[:]:
if B > 0:
Alunos_Aprovados_Finais.append(i)
cota_B.remove(i)
B -= 1
counter_vagas_ouvintes+=1
print ("B = ", B)
for i in candidatos_ouvintes[:]:
if i in cota_C[:]:
if C > 0:
Alunos_Aprovados_Finais.append(i)
cota_C.remove(i)
C -= 1
counter_vagas_ouvintes+=1
print ("C = ", C)
for i in candidatos_ouvintes[:]:
if i in cota_D[:]:
if D > 0:
Alunos_Aprovados_Finais.append(i)
cota_D.remove(i)
D -= 1
counter_vagas_ouvintes+=1
print ("D = ", D)
for i in candidatos_ouvintes[:]:
if i in cota_E[:]:
if E > 0:
Alunos_Aprovados_Finais.append(i)
cota_E.remove(i)
E -= 1
counter_vagas_ouvintes+=1
print ("E = ", E)
for i in candidatos_ouvintes[:]:
if i in cota_F[:]:
if F > 0:
Alunos_Aprovados_Finais.append(i)
cota_F.remove(i)
F -= 1
counter_vagas_ouvintes+=1
print ("F = ", F)
for i in candidatos_ouvintes[:]:
if i in cota_G[:]:
if G > 0:
Alunos_Aprovados_Finais.append(i)
cota_G.remove(i)
G -= 1
counter_vagas_ouvintes+=1
print ("G = ", G)
for i in candidatos_ouvintes[:]:
if i in cota_H[:]:
if H > 0:
Alunos_Aprovados_Finais.append(i)
cota_H.remove(i)
H -= 1
counter_vagas_ouvintes+=1
print ("H = ", H)
for i in candidatos_ouvintes[:]:
if i not in Alunos_Aprovados_Finais[:]:
if Ampla > 0:
Alunos_Aprovados_Finais.append(i)
Ampla -= 1
counter_vagas_ouvintes_nao_cota+=1
i["Tipo cota"] = "Ampla"
print ("Ampla = ", Ampla)
print("Total de vagas de COTAS preenchidas por OUVINTES = ",counter_vagas)
print("Total de vagas de AMPLA preenchidas por OUVINTES = ",counter_vagas_nao_cota)
Alunos_Aprovados_Finais_Ordenado = sorted(Alunos_Aprovados_Finais, key = lambda element: element["Pont"], reverse=True)
Display_Alunos_Aprovados_Finais = pd.DataFrame(Alunos_Aprovados_Finais_Ordenado)
Display_Alunos_Aprovados_Finais
ouvintes_aprovados_lista_1 = list()
def list_divide(data_in , data_out, field):
for i in data_in[:]:
if i[field] == False:
data_out.append(i)
return data_out
list_divide(Alunos_Aprovados_Finais_Ordenado , ouvintes_aprovados_lista_1, "surdo")
Display_Ouvintes_aprovados = pd.DataFrame(ouvintes_aprovados_lista_1)
#Display_Ouvintes_aprovados
#Display_surdos_aprovados
Display_Alunos_Aprovados_Finais
#export_Alunos_Aprovados = pd.ExcelWriter("Lista_01_POLO_INES.xlsx" , engine = 'xlsxwriter')
#Display_Alunos_Aprovados_Finais.to_excel(export_Alunos_Aprovados, index=False, sheet_name="Lista_01_INES")
#export_Alunos_Aprovados.save()
def write_csv(outputname , listname):
with open(outputname, "w") as output:
fieldnames = ["\ufeffNome" , "CPF" , "Classificação"]
writer = csv.DictWriter(output, fieldnames=fieldnames , delimiter = ";")
return writer.writerows(listname)
Alunos_aprovados_finais_MSTECH = Alunos_Aprovados_Finais_Ordenado[:]
for i in Alunos_aprovados_finais_MSTECH[:]:
del i["RG"]
del i["nascimento"]
del i["Ano ENEM"]
del i["Pont"]
del i["surdo"]
del i["Cota"]
del i["Tipo cota"]
del i["ENEM"]
del i["Polo desejado"]
for index, value in enumerate(Alunos_aprovados_finais_MSTECH[:],1):
value["Classificação"] = index
Alunos_Aprovados_TI = copy.deepcopy(Alunos_aprovados_finais_MSTECH)
for i in Alunos_Aprovados_TI:
list_str =list(i["CPF"])
list_str[0],list_str[1],list_str[2],list_str[3],list_str[4],list_str[5] = "X","X","X","X","X","X"
i["CPF"] = "".join(list_str)
Display_LISTA_TI = pd.DataFrame(Alunos_Aprovados_TI)
cols = Display_LISTA_TI.columns.tolist()
cols
cols = cols[-1:] + cols[:-1]
cols
Display_LISTA_TI = Display_LISTA_TI[cols]
Display_Alunos_Aprovados_Finais
Alunos_aprovados_finais_MSTECH
Display_LISTA_TI
name01 = "#ALUNOS_01_" + polo +".xlsx"
name02 = "#MS_" + polo + ".csv"
name03 = "#DIVULGACAO_" + polo +".xlsx"
print (name01)
print (name02)
print (name03)
export_Alunos_Aprovados = pd.ExcelWriter(name01 , engine = 'xlsxwriter')
Display_Alunos_Aprovados_Finais.to_excel(export_Alunos_Aprovados, index=False, sheet_name="Lista_01")
export_Alunos_Aprovados.save()
write_csv(name02, Alunos_aprovados_finais_MSTECH)
export_Alunos_Aprovados = pd.ExcelWriter(name03 , engine = 'xlsxwriter')
Display_LISTA_TI.to_excel(export_Alunos_Aprovados, index=False, sheet_name="Lista_01")
export_Alunos_Aprovados.save()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notebook Execute
Step2: plot_graph(Cotistas , "Pretos Pardos e Indígenas")
Step3: IMPORT ALL CANDIDATES CSV
Step4: Get CPFs
Step5: DELETAR
Step6: FILTRO DE POLO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Step7: SORT BY SCORE
Step8: separacao = list()
Step9: CHECK LISTS SPLIT
Step10: Divisão das cotas
Step11: NUMERO DE COTISTAS
Step12: CHECK SPLIT COTAS
Step13: LIMPAR APROVADOS DA LISTA GERAL
Step14: Definição dos surdos aprovados
Step15: Criação dos aprovados finais
Step16: Exclui surdos aprovados da lista geral
Step17: !!!!!!!DEFINIÇÃO DO NUMERO DE VAGAS POR COTA!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Step18: Selecionador de vagas para surdos
Step19: SELECIONADOR DE VAGAS DOS OUVINTES
Step20: ORDENA POR NOTA
Step21: Divisão da Lista
Step22: LISTA PARA MSTECH
|
2,533
|
<ASSISTANT_TASK:>
Python Code:
from pyesgf.search import SearchConnection
conn = SearchConnection('https://esgf-data.dkrz.de/esg-search', distrib=True)
ctx = conn.new_context(
project='CMIP6',
source_id='UKESM1-0-LL',
experiment_id='historical',
variable='tas',
frequency='mon',
variant_label='r1i1p1f2',
data_node='esgf-data3.ceda.ac.uk')
ctx.hit_count
result = ctx.search()[0]
result.dataset_id
files = result.file_context().search()
for file in files:
print(file.opendap_url)
import xarray as xr
ds = xr.open_dataset(files[0].opendap_url, chunks={'time': 120})
print(ds)
da = ds['tas']
da = da.isel(time=slice(0, 1))
da = da.sel(lat=slice(-50, 50), lon=slice(0, 50))
%matplotlib inline
da.plot()
ds_agg = xr.open_mfdataset([files[0].opendap_url, files[1].opendap_url], chunks={'time': 120}, combine='nested', concat_dim='time')
print(ds_agg)
da = ds_agg['tas']
da = da.isel(time=slice(1200, 1201))
da = da.sel(lat=slice(-50, 50), lon=slice(0, 50))
da.plot()
da.to_netcdf('tas_africa_19500116.nc')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Subset single dataset with xarray
Step2: Subset over multiple datasets
Step3: Download dataset
|
2,534
|
<ASSISTANT_TASK:>
Python Code:
!pip freeze | grep tensorflow-hub==0.7.0 || pip install tensorflow-hub==0.7.0
import os
import tensorflow as tf
import tensorflow_hub as hub
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
BUCKET = "your-gcp-bucket-here" # REPLACE WITH YOUR BUCKET NAME
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
%%bash
URL=http://www.gutenberg.org/cache/epub/24564/pg24564.txt
OUTDIR=gs://$BUCKET/custom_embedding
CORPUS=surgery_manual.txt
curl $URL > $CORPUS
gsutil cp $CORPUS $OUTDIR/$CORPUS
!echo gs://$BUCKET/custom_embedding/surgery_manual.txt
!echo gs://$BUCKET/custom_embedding
!echo tensorboard --port 8080 --logdir gs://$BUCKET/custom_embedding/embeddings
MODULE = "gs://{bucket}/custom_embedding/hub-module".format(bucket=BUCKET)
MODULE
med_embed = hub.KerasLayer(MODULE)
outputs = med_embed(tf.constant(['ilium', 'I have a fracture', 'aneurism']))
outputs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Replace by your GCP project and bucket
Step2: Setting up the Kubeflow cluster
Step3: It has very specialized language such as
Step 1
Step4: and for gcs-directory-path-for-pipeline-output which we will set to
Step5: Remark
Step6: The projector view will present you with a representation of the word vectors in a 3 dimensional space (the dim is reduced through PCA) that you can interact with. Enter in the search tool a few words like "ilium" and points in the 3D space will light up.
Step 7
Step7: Now we are ready to create a KerasLayer out of our custom text embedding.
Step8: That layer when called with a list of sentences will create a sentence vector for each sentence by averaging the word vectors of the sentence.
|
2,535
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.graphics.api import qqplot
print(sm.datasets.sunspots.NOTE)
dta = sm.datasets.sunspots.load_pandas().data
dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))
del dta["YEAR"]
dta.plot(figsize=(12,8));
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2)
arma_mod20 = ARIMA(dta, order=(2, 0, 0)).fit()
print(arma_mod20.params)
arma_mod30 = ARIMA(dta, order=(3, 0, 0)).fit()
print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)
print(arma_mod30.params)
print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic)
sm.stats.durbin_watson(arma_mod30.resid.values)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax = arma_mod30.resid.plot(ax=ax);
resid = arma_mod30.resid
stats.normaltest(resid)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
fig = qqplot(resid, line='q', ax=ax, fit=True)
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)
r,q,p = sm.tsa.acf(resid.values.squeeze(), fft=True, qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
predict_sunspots = arma_mod30.predict('1990', '2012', dynamic=True)
print(predict_sunspots)
def mean_forecast_err(y, yhat):
return y.sub(yhat).mean()
mean_forecast_err(dta.SUNACTIVITY, predict_sunspots)
from statsmodels.tsa.arima_process import ArmaProcess
np.random.seed(1234)
# include zero-th lag
arparams = np.array([1, .75, -.65, -.55, .9])
maparams = np.array([1, .65])
arma_t = ArmaProcess(arparams, maparams)
arma_t.isinvertible
arma_t.isstationary
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(arma_t.generate_sample(nsample=50));
arparams = np.array([1, .35, -.15, .55, .1])
maparams = np.array([1, .65])
arma_t = ArmaProcess(arparams, maparams)
arma_t.isstationary
arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5)
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2)
arma11 = ARIMA(arma_rvs, order=(1, 0, 1)).fit()
resid = arma11.resid
r,q,p = sm.tsa.acf(resid, fft=True, qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
arma41 = ARIMA(arma_rvs, order=(4, 0, 1)).fit()
resid = arma41.resid
r,q,p = sm.tsa.acf(resid, fft=True, qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
macrodta = sm.datasets.macrodata.load_pandas().data
macrodta.index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
cpi = macrodta["cpi"]
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax = cpi.plot(ax=ax);
ax.legend();
print(sm.tsa.adfuller(cpi)[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sunspots Data
Step2: Does our model obey the theory?
Step3: This indicates a lack of fit.
Step4: Exercise
Step5: Let's make sure this model is estimable.
Step6: What does this mean?
Step7: For mixed ARMA processes the Autocorrelation function is a mixture of exponentials and damped sine waves after (q-p) lags.
Step8: Exercise
Step9: Hint
Step10: P-value of the unit-root test, resoundingly rejects the null of a unit-root.
|
2,536
|
<ASSISTANT_TASK:>
Python Code:
import naminggamesal.ngpop as ngpop
pop_cfg={
'voc_cfg':{
'voc_type':'matrix',
'M':5,
'W':10
},
'strat_cfg':{
'strat_type':'naive',
'vu_cfg':{'vu_type':'BLIS_epirob'}
},
'interact_cfg':{
'interact_type':'speakerschoice'
},
'nbagent':5
}
testpop=ngpop.Population(**pop_cfg)
testpop
print(testpop)
print
testpop.visual(vtype="agents")
testpop.play_game(100)
print(testpop)
testpop.visual()
testpop.visual(vtype="agents")
testpop._agentlist[0]._vocabulary._content
testpop._agentlist[0]._vocabulary.add(0,0,0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's create a population. Agent creation is here dealt with automatically. Still, it is possible to manually add or remove agents (Hence the IDs of the agents), what will be seen later.
|
2,537
|
<ASSISTANT_TASK:>
Python Code:
#Importation des librairies utilisées
import time
import pandas as pd
import numpy as np
import collections
import itertools
import os
import warnings
warnings.filterwarnings('ignore')
from sklearn.cross_validation import train_test_split
data_valid_clean_stem = pd.read_csv("data/cdiscount_valid_clean_stem.csv").fillna("")
data_train_clean_stem = pd.read_csv("data/cdiscount_train_clean_stem.csv").fillna("")
DATA_OUTPUT_DIR = "data/features"
if not(os.path.isdir("data/features")):
os.mkdir("data/features")
train_array = data_train_clean_stem["Description"].values
valid_array = data_valid_clean_stem["Description"].values
train_array[0]
from sklearn.feature_extraction.text import CountVectorizer
extr_cv = CountVectorizer(binary=False)
data_train_OHE = extr_cv.fit_transform(train_array)
vocabulary = extr_cv.get_feature_names()
N_vocabulary = len(vocabulary)
print("Nombre de mots : %d" %N_vocabulary )
# %load solution/2_1.py
data_valid_OHE = extr_cv.transform(valid_array)
data_valid_OHE
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer(ngram_range=(1,1), norm = False)
data_train_TFIDF = vec.fit_transform(train_array)
vocabulary = vec.get_feature_names()
N_vocabulary = len(vocabulary)
N_vocabulary
# %load solution/2_2.py
data_valid_TFIDF = vec.transform(valid_array)
data_valid_TFIDF
# %load solution/2_2bis.py
train_dict_array = list(map(lambda x : collections.Counter(x.split(" ")), train_array))
train_dict_array[0]
from sklearn.feature_extraction import FeatureHasher
nb_hash = 300
feathash = FeatureHasher(nb_hash)
data_train_hash = feathash.fit_transform(train_dict_array)
ir = 0
rw = data_train_hash.getrow(ir)
print("Liste des tokens racinisé de la première ligne : " + train_array[0])
pd.DataFrame([(v, k) for k,v in zip(rw.data,rw.indices)], columns=["indices","weight"])
from sklearn.feature_extraction.text import TfidfTransformer
vec = TfidfTransformer(norm = False)
data_train_HTfidf = vec.fit_transform(data_train_hash)
data_train_HTfidf
ir = 0
rw = data_train_HTfidf.getrow(ir)
print(train_array[ir])
pd.DataFrame([(ind, vec.idf_[ind], w/vec.idf_[ind], w) for w,ind in zip(rw.data, rw.indices)], columns=["indices","idf","tf","weight"])
def vectorizer_train(df, columns=['Description'], nb_hash=None, nb_gram = 1, vectorizer = "tfidf" , binary = False):
data_array = [" ".join(line) for line in df[columns].values]
# Hashage
if nb_hash is None:
feathash = None
if vectorizer == "tfidf":
vec = TfidfVectorizer(ngram_range=(1,nb_gram))
data_vec = vec.fit_transform(data_array)
else:
vec = CountVectorizer(binary=binary)
data_vec = vec.fit_transform(data_array)
else:
data_dic_array = [collections.Counter(line.split(" ")) for line in data_array]
feathash = FeatureHasher(nb_hash)
data_hash = feathash.fit_transform(data_dic_array)
if vectorizer=="tfidf":
vec = TfidfTransformer()
data_vec = vec.fit_transform(data_hash)
else:
vec = None
data_vec = data_hash
return vec, feathash, data_vec
def apply_vectorizer(df, vec, feathash, columns =['Description', 'Libelle', 'Marque']):
data_array = [" ".join(line) for line in df[columns].values]
#Hashage
if feathash is None:
data_hash = data_array
else:
data_dic_array = [collections.Counter(line.split(" ")) for line in data_array]
data_hash = feathash.transform(data_dic_array)
if vec is None:
data_vec = data_hash
else:
data_vec = vec.transform(data_hash)
return data_vec
parameters = [[None, "count"],
[300, "count"],
[None, "tfidf"],
[300, "tfidf"]]
from scipy import sparse
for nb_hash, vectorizer in parameters:
ts = time.time()
vec, feathash, data_train_vec = vectorizer_train(data_train_clean_stem, nb_hash=nb_hash, vectorizer = vectorizer)
data_valid_vec = apply_vectorizer(data_valid_clean_stem, vec, feathash)
te = time.time()
print("nb_hash : " + str(nb_hash) + ", vectorizer : " + str(vectorizer))
print("Runing time for vectorization : %.1f seconds" %(te-ts))
print("Train shape : " + str(data_train_vec.shape))
print("Valid shape : " + str(data_valid_vec.shape))
sparse.save_npz(DATA_OUTPUT_DIR +"/vec_train_nb_hash_" + str(nb_hash) + "_vectorizer_" + str(vectorizer), data_train_vec)
sparse.save_npz(DATA_OUTPUT_DIR +"/vec_valid_nb_hash_" + str(nb_hash) + "_vectorizer_" + str(vectorizer), data_valid_vec)
import gensim
train_array_token = [line.split(" ") for line in train_array]
valid_array_token = [line.split(" ") for line in valid_array]
train_array_token[0]
Features_dimension = 300
hs = 0
negative = 10
sg = 1
print("Start learning skip-gram Word2Vec")
ts = time.time()
model_sg = gensim.models.Word2Vec(train_array_token, sg=sg, hs=hs, negative=negative, min_count=1, size=Features_dimension)
te = time.time()
t_learning = te-ts
print("Learning time : %.2f Word2Vec" %t_learning)
sg = 0
print("Start learning CBOW Word2Vec")
ts = time.time()
model_cbow = gensim.models.Word2Vec(train_array_token, sg=sg, hs=hs, negative=negative, min_count=1, size=Features_dimension)
te = time.time()
t_learning = te-ts
print("Learning time : %.2f Word2Vec" %t_learning)
model_online_dir = "data/fr/fr.bin"
#model_online_dir = "ACOMPLETER/fr.bin"
model_online = gensim.models.Word2Vec.load(model_online_dir)
import nltk
stemmer=nltk.stem.SnowballStemmer('french')
# %load solution/2_3.py
# %load solution/2_4.py
# %load solution/2_5.py
def get_features_mean(lines, model, f_size):
features = [model[x] for x in lines if x in model]
if features == []:
fm =np.ones(f_size)
else :
fm = np.mean(features,axis=0)
return fm
def get_matrix_features_means(X, model, f_size):
X_embedded_ = list(map(lambda x : get_features_mean(x, model, f_size), X))
X_embedded = np.vstack(X_embedded_)
return X_embedded
ts = time.time()
X_embedded_train_cbow = get_matrix_features_means(train_array_token, model_cbow, Features_dimension)
te = time.time()
t_build = te-ts
#np.save(embedded_train_dir, X_embedded_train)
print("Time conversion : %d seconds"%t_build)
print("Shape Matrix : (%d,%d)"%X_embedded_train_cbow.shape)
np.save(DATA_OUTPUT_DIR +"/embedded_train_cbow", X_embedded_train_cbow)
ts = time.time()
X_embedded_valid_cbow = get_matrix_features_means(valid_array_token, model_cbow, Features_dimension)
te = time.time()
t_build = te-ts
#np.save(embedded_train_dir, X_embedded_train)
print("Time conversion : %d seconds"%t_build)
print("Shape Matrix : (%d,%d)"%X_embedded_valid_cbow.shape)
np.save(DATA_OUTPUT_DIR +"/embedded_valid_cbow", X_embedded_valid_cbow)
ts = time.time()
X_embedded_train_sg = get_matrix_features_means(train_array_token, model_sg, Features_dimension)
te = time.time()
t_build = te-ts
#np.save(embedded_train_dir, X_embedded_train)
print("Time conversion : %d seconds"%t_build)
print("Shape Matrix : (%d,%d)"%X_embedded_train_sg.shape)
np.save(DATA_OUTPUT_DIR +"/embedded_train_sg", X_embedded_train_sg)
ts = time.time()
X_embedded_valid_sg = get_matrix_features_means(valid_array_token, model_sg, Features_dimension)
te = time.time()
t_build = te-ts
#np.save(embedded_train_dir, X_embedded_train)
print("Time conversion : %d seconds"%t_build)
print("Shape Matrix : (%d,%d)"%X_embedded_valid_sg.shape)
np.save(DATA_OUTPUT_DIR +"/embedded_valid_sg", X_embedded_valid_sg)
data_valid_clean = pd.read_csv("data/cdiscount_valid_clean.csv").fillna("")
data_train_clean = pd.read_csv("data/cdiscount_train_clean.csv").fillna("")
train_array_token_nostem = [line.split(" ") for line in data_train_clean["Description"].values]
valid_array_token_nostem = [line.split(" ") for line in data_valid_clean["Description"].values]
ts = time.time()
X_embedded_train_online = get_matrix_features_means(train_array_token_nostem, model_online, Features_dimension)
te = time.time()
t_build = te-ts
#np.save(embedded_train_dir, X_embedded_train)
print("Time conversion : %d seconds"%t_build)
print("Shape Matrix : (%d,%d)"%X_embedded_train_online.shape)
np.save(DATA_OUTPUT_DIR +"/embedded_train_online", X_embedded_train_online)
ts = time.time()
X_embedded_valid_online = get_matrix_features_means(valid_array_token_nostem, model_online, Features_dimension)
te = time.time()
t_build = te-ts
#np.save(embedded_train_dir, X_embedded_train)
print("Time conversion : %d seconds"%t_build)
print("Shape Matrix : (%d,%d)"%X_embedded_valid_online.shape)
np.save(DATA_OUTPUT_DIR +"/embedded_valid_online", X_embedded_valid_online)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Téléchargement des données
Step2: On créé un dossier dans lequel nous allons sauvegarder les DataFrame constitués des features que l'on va construire dans ce notebook
Step3: Dans un premier temps, en guise d'exemple et pour réduire le temps de calcul, on ne considère que la colonne Description de nos DataFrame générés dans le calepin précédent.
Step4: Vectorisaton
Step5: One-Hot-Encoding
Step6: Q A quoi sert l'argument binary de la classe?
Step7: Exercice Pour la première ligne de votre dataset train. retrouvez l'ensemble des mots constituant cette ligne à partir de l'objet data_train_OHEet de vocabulary ainsi que le nombre d'occurence de chacun de ces mots dans la ligne.
Step8: La même transformation est appliqué sur l'échantillon de validation.
Step9: Q Que se passe-til pour les mots présents dans le dataset de validation mais qui ne sont pas présent dans le dataset d'apprentissage?
Step10: Q A quoi sert l'argument ngram_range?
Step11: Exercice Pour la première ligne de votre dataset train. retrouvez l'ensemble des mots constituant cette ligne à partir de l'objet data_train_TFIDFet de vocabulary ainsi que la valeur de l'idf, du tf et du poids tfidf de chacun de ces mots dans la ligne
Step12: Q Commentez les valeurs de l'idf pour chacun des mots.
Step13: ATENTION Si le tf est recalculé pour chaque ligne, le même idf est utilisé
Step14: Hashing
Step15: Constatez que data_train_hash est stocké sous le même format que data_train_OHE ou data_train_TFIDF.
Step16: Q Que pouvez-vous dire des poids?
Step17: Build and Save Vectorize Vector
Step18: Word2Vec
Step19: Build Word2Vec model
Step20: Cette fonction contient un grand nombre d' arguments. Le but de ce TP n'est pas d'optimiser les paramètres de ce modèle mais de les comprendre. Nous allons donc fixer quelques arguments par défault
Step21: Nous allons créer deux modèles
Step22: Q Que dire du temps d'apprentissage de ces deux modèles? D'ou vient cette différence?
Step23: Propriété du modèle
Step24: Most similar world
Step25: Q Comparez la qualité de prévision des modèles que nous avons entrainés sur le jeu de données 'Cdiscount' avec celui appris online. Que pouvez-vous en dire?
Step26: Exercice Testez d'autres combinaisons si vous le souhaitez.
Step27: Build Features
Step28: Cbow
Step29: Skip-Gram
Step30: Online model
|
2,538
|
<ASSISTANT_TASK:>
Python Code:
# Import the simulation function
from pymer4.simulate import simulate_lm
# Also fix the random number generator for reproducibility
import numpy as np
np.random.seed(10)
data, b = simulate_lm(
500, 3, coef_vals=[100, 1.2, -40.1, 3], mus=[10, 30, 1], noise_params=(0, 5)
)
print(f"True coefficients:\n{b}\n")
print(f"Data:\n{data.head()}")
print(data.iloc[:, 1:].mean(axis=0))
print(data.iloc[:, 1:].corr())
from pymer4.models import Lm
model = Lm("DV ~ IV1+IV2+IV3", data=data)
model.fit(summarize=False)
print(model.coefs.loc[:, "Estimate"])
data, b = simulate_lm(100, 5)
print(f"True coefficients:\n{b}\n")
print(f"Data:\n{data.head()}")
from pymer4.simulate import simulate_lmm
num_obs = 50
num_coef = 3
num_grps = 100
mus = [10.0, 30.0, 2.0]
coef_vals = [4.0, 1.8, -2, 10]
corrs = 0.15
data, blups, b = simulate_lmm(
num_obs, num_coef, num_grps, coef_vals=coef_vals, mus=mus, corrs=corrs
)
print(f"True coefficients:\n{b}\n")
print(f"BLUPs:\n{blups.head()}\n")
print(f"Data:\n{data.head()}\n")
# Group the data before running checks
group_data = data.groupby("Group")
print(group_data.apply(lambda grp: grp.iloc[:, 1:-1].mean(axis=0)))
print(group_data.apply(lambda grp: grp.iloc[:, 1:-1].corr()))
from pymer4.models import Lmer
model = Lmer("DV ~ IV1+IV2+IV3 + (1|Group)", data=data)
model.fit(summarize=False)
print(model.coefs.loc[:, "Estimate"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here are some checks you might do to make sure the data were correctly generated
Step2: Check correlations between predictors
Step3: Check coefficient recovery
Step4: You have the option of being as general or specific as you like when generating data. Here's a simpler example that generates 100 observations with 5 predictors from a standard normal distribution, i.e. mean = 0, sd = 1 with random correlations between predictors.
Step5: Generating multi-level regression data
Step6: Again here are some checks you might do to make sure the data were correctly generated (by default lmm data will generally be a bit noisier due to within and across group/cluster variance; see the API for how to customize this)
Step7: Check mean of predictors within each group
Step8: Check correlations between predictors within each group
Step9: Check coefficient recovery
|
2,539
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
!{sys.executable} -m pip install --upgrade "tfx<2"
# Set `PATH` to include user python binary directory.
HOME=%env HOME
PATH=%env PATH
%env PATH={PATH}:{HOME}/.local/bin
!python3 -c "from tfx import version ; print('TFX version: {}'.format(version.__version__))"
PIPELINE_NAME="my_pipeline"
import os
# Create a project directory under Colab content directory.
PROJECT_DIR=os.path.join(os.sep,"content",PIPELINE_NAME)
!tfx template copy \
--pipeline_name={PIPELINE_NAME} \
--destination_path={PROJECT_DIR} \
--model=taxi
%cd {PROJECT_DIR}
!{sys.executable} -m models.features_test
!{sys.executable} -m models.keras.model_test
!tfx pipeline create --engine=local --pipeline_path=local_runner.py
!tfx run create --engine=local --pipeline_name={PIPELINE_NAME}
# Update the pipeline
!tfx pipeline update --engine=local --pipeline_path=local_runner.py
# You can run the pipeline the same way.
!tfx run create --engine local --pipeline_name {PIPELINE_NAME}
!tfx pipeline update --engine=local --pipeline_path=local_runner.py
!tfx run create --engine local --pipeline_name {PIPELINE_NAME}
if 'google.colab' in sys.modules:
from google.colab import auth
auth.authenticate_user()
print('Authenticated')
# Set your project name below.
# WARNING! ENTER your project name before running this cell.
%env GOOGLE_CLOUD_PROJECT=YOUR_PROJECT_NAME_HERE
!tfx pipeline update --engine=local --pipeline_path=local_runner.py
!tfx run create --engine local --pipeline_name {PIPELINE_NAME}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a TFX pipeline using templates with Local orchestrator
Step2: NOTE
Step3: Let's check the version of TFX.
Step4: And, it's done. We are ready to create a pipeline.
Step 2. Copy predefined template to your project directory.
Step5: TFX includes the taxi template with the TFX python package. If you are planning to solve a point-wise prediction problem, including classification and regresssion, this template could be used as a starting point.
Step6: Change the working directory context in this notebook to the project directory.
Step7: Step 3. Browse your copied source files.
Step8: Step 4. Run your first TFX pipeline
Step9: Then, you can run the created pipeline using run create command.
Step10: If successful, you'll see Component CsvExampleGen is finished. When you copy the template, only one component, CsvExampleGen, is included in the pipeline.
Step 5. Add components for data validation.
Step11: You should be able to see the output log from the added components. Our pipeline creates output artifacts in tfx_pipeline_output/my_pipeline directory.
Step 6. Add components for training.
Step12: When this execution run finishes successfully, you have now created and run your first TFX pipeline using Local orchestrator!
Step13: You should specify your GCP project name to access BigQuery resources using TFX. Set GOOGLE_CLOUD_PROJECT environment variable to your project name.
Step14: Open pipeline/pipeline.py. Comment out CsvExampleGen and uncomment the line which create an instance of BigQueryExampleGen. You also need to uncomment query argument of the create_pipeline function.
|
2,540
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['figure.figsize'] = (20.0, 10.0)
plt.rcParams['font.family'] = "serif"
df = pd.read_csv('../../datasets/movie_metadata.csv')
df.head()
# split each movie's genre list, then form a set from the unwrapped list of all genres
categories = set([s for genre_list in df.genres.unique() for s in genre_list.split("|")])
# one-hot encode each movie's classification
for cat in categories:
df[cat] = df.genres.transform(lambda s: int(cat in s))
# drop other columns
df = df[['director_name','genres','duration'] + list(categories)]
df.head()
# convert from wide to long format and remove null classificaitons
df = pd.melt(df,
id_vars=['duration'],
value_vars = list(categories),
var_name = 'Category',
value_name = 'Count')
df = df.loc[df.Count>0]
top_categories = df.groupby('Category').aggregate(sum).sort_values('Count', ascending=False).index
howmany=10
# add an indicator whether a movie is short or long, split at 100 minutes runtime
df['islong'] = df.duration.transform(lambda x: int(x > 100))
df = df.loc[df.Category.isin(top_categories[:howmany])]
# sort in descending order
#df = df.loc[df.groupby('Category').transform(sum).sort_values('Count', ascending=False).index]
df.head()
p = sns.countplot(data=df, x = 'Category')
p = sns.countplot(data=df,
x = 'Category',
hue = 'islong')
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong')
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2)
sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
ax=ax[1])
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories))
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2,
fill=False)
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
sns.set(font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
help(sns.set)
plt.rcParams['font.family'] = "cursive"
#sns.set(style="white",font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
plt.rcParams['font.family'] = 'Times New Roman'
#sns.set_style({'font.family': 'Helvetica'})
sns.set(style="white",font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
bg_color = (0.25, 0.25, 0.25)
sns.set(rc={"font.style":"normal",
"axes.facecolor":bg_color,
"figure.facecolor":bg_color,
"text.color":"black",
"xtick.color":"black",
"ytick.color":"black",
"axes.labelcolor":"black"})
#sns.set_style({'font.family': 'Helvetica'})
#sns.set(style="white",font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2,
palette="Dark2")
leg = p.get_legend()
leg.set_title("Duration")
labs = leg.texts
labs[0].set_text("Short")
labs[1].set_text("Long")
leg.get_title().set_color('white')
for lab in labs:
lab.set_color('white')
p.axes.xaxis.label.set_text("Counts")
plt.text(900,0, "Count Plot", fontsize = 95, color='white', fontstyle='italic')
p.get_figure().savefig('../figures/countplot.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For the bar plot, let's look at the number of movies in each category, allowing each movie to be counted more than once.
Step2: Basic plot
Step3: color by a category
Step4: make plot horizontal
Step5: Saturation
Step6: Targeting a non-default axes
Step7: Add error bars
Step8: add black bounding lines
Step9: Remove color fill
|
2,541
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
%matplotlib inline
plt.figure(figsize = (12, 6))
for i in range(10):
x = np.arange(i * 10, i * 10 + 10)
y_var1 = np.random.randint(1, 5, 10)
y_var2 = np.random.randint(5, 8, 10)
plt.plot(x, y_var1, color = 'k', label = 'variable1')
plt.plot(x, y_var2, color = 'g', label = 'variable2')
plt.legend()
plt.ylim(0, 9)
plt.figure(figsize = (12, 6))
for i in range(10):
x = np.arange(i * 10, i * 10 + 10)
y_var1 = np.random.randint(1, 5, 10)
y_var2 = np.random.randint(5, 8, 10)
plt.plot(x, y_var1, color = 'k', label = 'variable1' if i == 0 else "_esto_no_se_pintará")
plt.plot(x, y_var2, color = 'g', label = 'variable2' if i == 0 else "_esto_tampoco")
plt.legend()
plt.ylim(0, 9)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Como véis, en la gráfica anterior hay varios problemas pero como esta es una MicroEntrada solo nos vamos a centrar en el problema de las etiquetas repetidas en la leyenda.
|
2,542
|
<ASSISTANT_TASK:>
Python Code:
%load_ext cypher
%%cypher
MATCH
(t:Type)-[:ANNOTATED_BY]->()-[:OF_TYPE]->(a:Type)
WHERE
a.fqn="javax.persistence.Entity"
SET
t:Entity
RETURN
t.fqn AS Entity
%%cypher
MATCH (e:Entity)<-[:CONTAINS]-(p:Package)
WHERE p.name = "model"
RETURN e.fqn as Entity, p.name as Package
%%cypher
MATCH (e:Entity)<-[:CONTAINS]-(p:Package)
WHERE p.name <> "model"
RETURN e.fqn as MisplacedEntity, p.name as WrongPackage
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define Concerns
Step2: Rule Definition
Step3: Rule Violations
|
2,543
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from tqdm import tqdm
import theano
import theano.tensor as T
from theano.sandbox.cuda.dnn import dnn_conv
from PIL import Image
from lib import activations
from lib import updates
from lib import inits
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, deconv, dropout, l2normalize
from lib.metrics import nnc_score, nnd_score
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data, center_crop, patch
from fuel.datasets.hdf5 import H5PYDataset
from fuel.schemes import ShuffledScheme, SequentialScheme
from fuel.streams import DataStream
import h5py
try:
hf["target"].shape
except:
hf = h5py.File('faces.hdf5','r+')
num_samples = hf["input"].shape[0]
print "number of samples in dataset : %i" %num_samples
split_dict = {
'train': {'input': (2000, num_samples), 'target': (2000, num_samples)},
'test': {'input': (0, 1000), 'target': (0, 1000)},
'val': {'input': (1000, 2000), 'target': (1000, 2000)}
}
hf.attrs['split'] = H5PYDataset.create_split_array(split_dict)
train_set = H5PYDataset('faces.hdf5', which_sets=('train',))
test_set = H5PYDataset('faces.hdf5', which_sets=('test',))
val_set = H5PYDataset('faces.hdf5', which_sets=('val',))
#batch_size = 128
batch_size = 12
#TODO : use shuffledscheme instead? Seems slower, might have screwed up the chunksize in the HDF5 files?
tr_scheme = SequentialScheme(examples=train_set.num_examples, batch_size=batch_size)
tr_stream = DataStream(train_set, iteration_scheme=tr_scheme)
val_scheme = SequentialScheme(examples=val_set.num_examples, batch_size=batch_size)
val_stream = DataStream(val_set, iteration_scheme=val_scheme)
test_scheme = SequentialScheme(examples=test_set.num_examples, batch_size=batch_size)
test_stream = DataStream(test_set, iteration_scheme=test_scheme)
for x_train, x_target in tr_stream.get_epoch_iterator():
break
print "EXAMPLE TARGET IMAGE:"
Image.fromarray(x_target[3].astype(np.uint8))
print "EXAMPLE INPUT IMAGE:"
Image.fromarray(x_train[3].astype(np.uint8))
def target_transform(X):
return floatX(X).transpose(0, 3, 1, 2)/127.5 - 1.
def input_transform(X):
return target_transform(X)
l2 = 1e-5 # l2 weight decay
nvis = 196 # # of samples to visualize during training
b1 = 0.5 # momentum term of adam
nc = 3 # # of channels in image
#nbatch = 128 # # of examples in batch
nbatch = 12 # # of examples in batch
npx = 64 # # of pixels width/height of images
nx = npx*npx*nc # # of dimensions in X
niter = 1000 # # of iter at starting learning rate
niter_decay = 30 # # of iter to linearly decay learning rate to zero
lr = 0.0002 # initial learning rate for adam
ntrain = 25000 # # of examples to train on
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
tanh = activations.Tanh()
bce = T.nnet.binary_crossentropy
def mse(x,y):
return T.sum(T.pow(x-y,2), axis = 1)
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
sigma_ifn = inits.Normal(loc = -100., scale=0.02)
gain_ifn = inits.Normal(loc=1., scale=0.02)
bias_ifn = inits.Constant(c=0.)
def make_conv_layer(X, input_size, output_size, input_filters,
output_filters, name, index,
weights = None, filter_sz = 5):
is_deconv = output_size >= input_size
w_size = (input_filters, output_filters, filter_sz, filter_sz) \
if is_deconv else (output_filters, input_filters, filter_sz, filter_sz)
if weights is None:
w = gifn(w_size, '%sw%i' %(name, index))
g = gain_ifn((output_filters), '%sg%i' %(name, index))
b = bias_ifn((output_filters), '%sb%i' %(name, index))
else:
w,g,b = weights
conv_method = deconv if is_deconv else dnn_conv
activation = relu if is_deconv else lrelu
sub = output_size / input_size if is_deconv else input_size / output_size
if filter_sz == 3:
bm = 1
else:
bm = 2
layer = activation(batchnorm(conv_method(X, w, subsample=(sub, sub), border_mode=(bm, bm)), g=g, b=b))
return layer, [w,g,b]
def make_conv_set(input, layer_sizes, num_filters, name, weights = None, filter_szs = None):
assert(len(layer_sizes) == len(num_filters))
vars_ = []
layers_ = []
current_layer = input
for i in range(len(layer_sizes) - 1):
input_size = layer_sizes[i]
output_size = layer_sizes[i + 1]
input_filters = num_filters[i]
output_filters = num_filters[i + 1]
if weights is not None:
this_wts = weights[i * 3 : i * 3 + 3]
else:
this_wts = None
if filter_szs != None:
filter_sz = filter_szs[i]
else:
filter_sz = 5
layer, new_vars = make_conv_layer(current_layer, input_size, output_size,
input_filters, output_filters, name, i,
weights = this_wts, filter_sz = filter_sz)
vars_ += new_vars
layers_ += [layer]
current_layer = layer
return current_layer, vars_, layers_
import pickle
#Use code below if you want use a saved model
'''
[e_params, g_params, d_params] = pickle.load( open( "models/autoencoder_100epoch/faces_dcgan_denoising_64epoch_100encoding.pkl", "rb" ) )
gwx = g_params[-1]
dwy = d_params[-1]
# inputs
X = T.tensor4()
## encode layer
e_layer_sizes = [128, 64, 32, 16, 8]
e_filter_sizes = [3, 256, 256, 512, 1024]
eX, e_params, e_layers = make_conv_set(X, e_layer_sizes, e_filter_sizes, "e", weights=e_params)
## generative layer
g_layer_sizes = [8, 16, 32, 64, 128]
g_num_filters = [1024, 512, 256, 256, 128]
g_out, g_params, g_layers = make_conv_set(eX, g_layer_sizes, g_num_filters, "g", weights=g_params)
g_params += [gwx]
gX = tanh(deconv(g_out, gwx, subsample=(1, 1), border_mode=(2, 2)))
## discrim layer(s)
df1 = 128
d_layer_sizes = [128, 64, 32, 16, 8]
d_filter_sizes = [3, df1, 2 * df1, 4 * df1, 8 * df1]
def discrim(input, name, weights=None):
d_out, disc_params, d_layers = make_conv_set(input, d_layer_sizes, d_filter_sizes, name, weights = weights)
d_flat = T.flatten(d_out, 2)
disc_params += [dwy]
y = sigmoid(T.dot(d_flat, dwy))
return y, disc_params, d_layers
# target outputs
target = T.tensor4()
p_real, d_params, d_layers = discrim(target, "d", weights=d_params)
#we need to make sure the p_gen params are the same as the p_real params
p_gen , d_params2, d_layers = discrim(gX, "d", weights=d_params)
'''
#Use code below if you are training a model from scratch
# inputs
X = T.tensor4()
## encode layer
e_layer_sizes = [128, 64, 32, 16, 8]
e_filter_sizes = [3, 256, 256, 512, 1024]
eX, e_params, e_layers = make_conv_set(X, e_layer_sizes, e_filter_sizes, "e")
## generative layer
g_layer_sizes = [8, 16, 32, 64, 128]
g_num_filters = [1024, 512, 256, 256, 128]
g_out, g_params, g_layers = make_conv_set(eX, g_layer_sizes, g_num_filters, "g")
gwx = gifn((128, nc, 5, 5), 'gwx')
g_params += [gwx]
gX = tanh(deconv(g_out, gwx, subsample=(1, 1), border_mode=(2, 2)))
## discrim layer(s)
df1 = 128
d_layer_sizes = [128, 64, 32, 16, 8]
d_filter_sizes = [3, df1, 2 * df1, 4 * df1, 8 * df1]
dwy = difn((df1 * 8 * 10 * 8, 1), 'dwy')
def discrim(input, name, weights=None):
d_out, disc_params, d_layers = make_conv_set(input, d_layer_sizes, d_filter_sizes, name, weights = weights)
d_flat = T.flatten(d_out, 2)
disc_params += [dwy]
y = sigmoid(T.dot(d_flat, dwy))
return y, disc_params, d_layers
# target outputs
target = T.tensor4()
p_real, d_params, d_layers = discrim(target, "d")
#we need to make sure the p_gen params are the same as the p_real params
p_gen , d_params2, d_layers = discrim(gX, "d")
# test everything working so far (errors are most likely size mismatches)
f = theano.function([X], p_gen)
f(input_transform(x_train)).shape
from theano.tensor.signal.downsample import max_pool_2d
## GAN costs
d_cost_real = bce(p_real, T.ones(p_real.shape)).mean()
d_cost_gen = bce(p_gen, T.zeros(p_gen.shape)).mean()
g_cost_d = bce(p_gen, T.ones(p_gen.shape)).mean()
## MSE encoding cost is done on an (averaged) downscaling of the image
target_pool = max_pool_2d(target, (4,4), mode="average_exc_pad",ignore_border=True)
target_flat = T.flatten(target_pool, 2)
gX_pool = max_pool_2d(gX, (4,4), mode="average_exc_pad",ignore_border=True)
gX_flat = T.flatten(gX_pool,2)
enc_cost = mse(gX_flat, target_flat).mean()
## MSE encoding without max pooling
'''
target_flat = T.flatten(target, 2)
gX_flat = T.flatten(gX,2)
enc_cost = mse(gX_flat, target_flat).mean()
'''
## generator cost is a linear combination of the discrim cost plus the MSE enocding cost
d_cost = d_cost_real + d_cost_gen
#To change the weight of MSE, change the denominator. ex. enc_cost/5 weights MSE much less that enc_cost/1
g_cost = g_cost_d + enc_cost / 1
## N.B. e_cost and e_updates will only try and minimise MSE loss on the autoencoder (for debugging)
e_cost = enc_cost
cost = [g_cost_d, d_cost_real, enc_cost]
elrt = sharedX(0.002)
lrt = sharedX(lr)
d_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
g_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
e_updater = updates.Adam(lr=elrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
d_updates = d_updater(d_params, d_cost)
g_updates = g_updater(e_params + g_params, g_cost)
e_updates = e_updater(e_params, e_cost)
print 'COMPILING'
t = time()
_train_g = theano.function([X, target], cost, updates=g_updates)
_train_d = theano.function([X, target], cost, updates=d_updates)
_train_e = theano.function([X, target], cost, updates=e_updates)
_get_cost = theano.function([X, target], cost)
print '%.2f seconds to compile theano functions'%(time()-t)
img_dir = "gen_images/"
if not os.path.exists(img_dir):
os.makedirs(img_dir)
ae_encode = theano.function([X, target], [gX, target])
def inverse(X):
X_pred = (X.transpose(0, 2, 3, 1) + 1) * 127.5
X_pred = np.rint(X_pred).astype(int)
X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
return X_pred.astype('uint8')
def save_sample_pictures():
for te_train, te_target in test_stream.get_epoch_iterator():
break
te_out, te_ta = ae_encode(input_transform(te_train), target_transform(te_target))
te_reshape = inverse(te_out)
te_target_reshape = inverse(te_ta)
new_size = (128 * 6, 160 * 12)
new_im = Image.new('RGB', new_size)
r = np.random.choice(12, 24, replace=True).reshape(2,12)
for i in range(2):
for j in range(12):
index = r[i][j]
target_im = Image.fromarray(te_target_reshape[index])
train_im = Image.fromarray(te_train[index].astype(np.uint8))
im = Image.fromarray(te_reshape[index])
new_im.paste(target_im, (128 * i * 3, 160 * j))
new_im.paste(train_im, (128 * (i * 3 + 1), 160 * j))
new_im.paste(im, (128 * (i * 3 + 2), 160 * j))
img_loc = "gen_images/%i.png" %int(time())
print "saving images to %s" %img_loc
new_im.save(img_loc)
#saves output for all testing images. This may take a couple of minutes to run.
def save_all_pictures():
counter = 0
for te_train, te_target in test_stream.get_epoch_iterator():
te_out, te_ta = ae_encode(input_transform(te_train), target_transform(te_target))
te_reshape = inverse(te_out)
te_target_reshape = inverse(te_ta)
new_size = (128 * 3, 160 * 12)
new_im = Image.new('RGB', new_size)
r = [range(12),range(12)]
for i in range(1):
for j in range(12):
index = r[i][j]
try:
target_im = Image.fromarray(te_target_reshape[index])
train_im = Image.fromarray(te_train[index].astype(np.uint8))
im = Image.fromarray(te_reshape[index])
new_im.paste(target_im, (128 * i * 3, 160 * j))
new_im.paste(train_im, (128 * (i * 3 + 1), 160 * j))
new_im.paste(im, (128 * (i * 3 + 2), 160 * j))
except:
print "Eror with training image"
img_loc = "gen_images/test_result_%i.png" %counter
print "saving images to %s" %img_loc
new_im.save(img_loc)
counter+=1
#save_all_pictures()
save_sample_pictures()
def mn(l):
if sum(l) == 0:
return 0
return sum(l) / len(l)
## TODO : nicer way of coding these means?
def get_test_errors():
print "getting test error"
g_costs = []
d_costs = []
e_costs = []
k_costs = []
for i in range(20):
try:
x_train, x_target = te_iterator.next()
except:
te_iterator = val_stream.get_epoch_iterator()
x_train, x_target = te_iterator.next()
x = input_transform(x_train)
t = target_transform(x_target)
cost = _get_cost(x,t)
g_cost, d_cost, enc_cost = cost
g_costs.append(g_cost)
d_costs.append(d_cost)
e_costs.append(enc_cost)
s= " ,".join(["test errors :", str(mn(g_costs)), str(mn(d_costs)), str(mn(e_costs))])
return s
iterator = tr_stream.get_epoch_iterator()
# you may wish to reset the learning rate to something of your choosing if you feel it is too high/low
lrt = sharedX(lr)
from time import time
n_updates = 0
t = time()
n_epochs = 200
print "STARTING"
for epoch in range(n_epochs):
tm = time()
g_costs = []
d_costs = []
e_costs = []
## TODO : produces pretty ugly output, redo this?
for i in tqdm(range(num_samples/128)):
try:
x_train, x_target = iterator.next()
except:
iterator = tr_stream.get_epoch_iterator()
x_train, x_target = iterator.next()
x = input_transform(x_train)
t = target_transform(x_target)
## optional - change the criteria for how often we train the generator or discriminator
if n_updates % 2 == 1:
cost = _train_g(x,t)
else:
cost = _train_d(x,t)
# optional - only train the generator on MSE cost. If you want to only train the autoencoder, uncomment the
# and comment the cost updates above
#cost = _train_e(x,t)
g_cost, d_cost, enc_cost = cost
g_costs.append(g_cost)
d_costs.append(d_cost)
e_costs.append(enc_cost)
if n_updates % 100 == 0:
s= " ,".join(["training errors :", str(mn(g_costs)), str(mn(d_costs)), str(mn(e_costs))])
g_costs = []
d_costs = []
e_costs = []
print get_test_errors()
print s
sys.stdout.flush()
save_sample_pictures()
n_updates += 1
print "epoch %i of %i took %.2f seconds" %(epoch, n_epochs, time() - tm)
## optional - reduce the learning rate as you go
#lrt.set_value(floatX(lrt.get_value() * 0.95))
#print lrt.get_value()
sys.stdout.flush()
import pickle
all_params = [e_params, g_params, d_params]
pickle.dump(all_params, open("faces_dcgan_denoising_165epoch_1encoding.pkl", 'w'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: N.B. The code from the following imports is lifted from the original dcgan project
Step2: Data Stuff
Step3: Check data looks sensible
Step4: Setup Neural Network
Step5: The following methods are to help adjust the sizes of the convolutional layers in the generator and discriminator, which is very fiddly to do otherwise. The (overloaded) method make_conv_set can be used to create both the conv
Step6: Next we set up the various cost functions we need
Step7: Training code
Step8: Train Model
Step9: Save weights if wanted
|
2,544
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
@memory.cache
def g(x):
print('A long-running calculation, with parameter %s' % x)
return np.hamming(x)
@memory.cache
def h(x):
print('A second long-running calculation, using g(x)')
return np.vander(x)
a = g(3)
a
g(3)
h(a)
h(a)
cachedir2 = mkdtemp()
memory2 = Memory(cachedir=cachedir2, mmap_mode='r')
square = memory2.cache(np.square)
a = np.vander(np.arange(3)).astype(np.float)
square(a)
# call it again
res = square(a)
res
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: using memmapping
|
2,545
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
mystr = "100110"
result = np.array(list(mystr), dtype = int)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,546
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_digits
digits = load_digits()
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# plot the digits: each image is 8x8 pixels
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')
# label the image with the target value
ax.text(0, 7, str(digits.target[i]))
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2, random_state=1999)
proj = pca.fit_transform(digits.data)
plt.scatter(proj[:, 0], proj[:, 1], c=digits.target)
plt.colorbar()
from sklearn.manifold import Isomap
iso = Isomap(n_neighbors=5, n_components=2)
proj = iso.fit_transform(digits.data)
plt.scatter(proj[:, 0], proj[:, 1], c=digits.target)
plt.colorbar()
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import train_test_split
# split the data into training and validation sets
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, random_state=0)
# train the model
clf = GaussianNB()
clf.fit(X_train, y_train)
# use the model to predict the labels of the test data
predicted = clf.predict(X_test)
expected = y_test
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# plot the digits: each image is 8x8 pixels
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(X_test.reshape(-1, 8, 8)[i], cmap=plt.cm.binary,
interpolation='nearest')
# label the image with the target value
if predicted[i] == expected[i]:
ax.text(0, 7, str(predicted[i]), color='green')
else:
ax.text(0, 7, str(predicted[i]), color='red')
matches = (predicted == expected)
print(matches.sum())
print(len(matches))
matches.sum() / float(len(matches))
print(clf.score(X_test, y_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll re-use some of our code from before to visualize the data and remind us what
Step2: Visualizing the Data
Step3: Here we see that the digits do cluster fairly well, so we can expect even
Step4: It can be fun to explore the various manifold learning methods available,
Step5: Question
Step6: Quantitative Measurement of Performance
Step7: We see that nearly 1500 of the 1800 predictions match the input. But there are other
|
2,547
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
iris = pd.read_csv('../datasets/iris_without_classes.csv') # Read the file 'datasets/iris_without_classes.csv'
# Print the first entries using the head() method to check that there is no Class information anymore
iris.head()
# Use PCA's fit_transform() method to reduce the dataset size to two dimensions
from sklearn.decomposition import PCA
RANDOM_STATE=1234
pca = PCA(n_components=2, random_state=RANDOM_STATE) # Create a PCA object with two components
iris_2d = pca.fit_transform(iris) # use fit_transform() to reduce the original dataset into two dimensions
# Create a scatterplot of the reduced dataset
import matplotlib.pyplot as plt
%matplotlib inline
# Create a scatterplot of the two dimensions of the transformed data
plt.scatter(iris_2d[:, 0], iris_2d[:, 1])
# Show the scatterplot
plt.show()
# Create two KMeans models: one with two clusters and another with three clusters
# Store the labels predicted by the KMeans models using two and three clusters
from sklearn.cluster import KMeans
model2 = KMeans(n_clusters=2, random_state=RANDOM_STATE).fit(iris) # Create a KMeans model expecting two clusters
labels2 = model2.predict(iris) # Predict the cluster label for each data point using predict()
model3 = KMeans(n_clusters=3, random_state=RANDOM_STATE).fit(iris) # Create a KMeans model expecting three clusters
labels3 = model3.predict(iris) # Predict the cluster label for each data point using predict()
# Plot the 2-clusters assignments using the reduced dataset. Use different colors for each cluster
plt.scatter(iris_2d[labels2 == 0, 0], iris_2d[labels2 == 0, 1], color='red')
plt.scatter(iris_2d[labels2 == 1, 0], iris_2d[labels2 == 1, 1], color='blue')
# Show the scatterplot
plt.show()
# Plot the 3-clusters assignments using the reduced dataset. Use different colors for each cluster
plt.scatter(iris_2d[labels3 == 0, 0], iris_2d[labels3 == 0, 1], color='red')
plt.scatter(iris_2d[labels3 == 1, 0], iris_2d[labels3 == 1, 1], color='blue')
plt.scatter(iris_2d[labels3 == 2, 0], iris_2d[labels3 == 2, 1], color='green')
# Show the scatterplot
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reducing dimensions
Step2: How many distinct groups can you see?
|
2,548
|
<ASSISTANT_TASK:>
Python Code:
from pprint import pprint
from time import sleep
from pynq import PL
from pynq import Overlay
from pynq.drivers import Trace_Buffer
from pynq.iop import Pmod_TMP2
from pynq.iop import PMODA
from pynq.iop import PMODB
from pynq.iop import ARDUINO
ol = Overlay("base.bit")
ol.download()
tmp2 = Pmod_TMP2(PMODA)
tmp2.set_log_interval_ms(1)
tr_buf = Trace_Buffer(PMODA,pins=[2,3],probes=['SCL','SDA'],
protocol="i2c",rate=1000000)
# Start the trace buffer
tr_buf.start()
# Issue reads for 1 second
tmp2.start_log()
sleep(1)
tmp2_log = tmp2.get_log()
# Stop the trace buffer
tr_buf.stop()
# Set up samples
start = 500
stop = 3500
# Parsing and decoding samples
tr_buf.parse("i2c_trace.csv",start,stop)
tr_buf.decode("i2c_trace.pd")
tr_buf.display()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step1: Step 2
Step2: Step 3
Step3: Step 4
Step4: Step 5
|
2,549
|
<ASSISTANT_TASK:>
Python Code:
# Import the FISSA toolbox
import fissa
# For plotting our results, import numpy and matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Fetch the colormap object for Cynthia Brewer's Paired color scheme
colors = plt.get_cmap("Paired")
# Define path to imagery and to the ROI set
images_location = "exampleData/20150529"
rois_location = "exampleData/20150429.zip"
# Call FISSA using the function-based interface
result, raw = fissa.run_fissa(images_location, rois_location)
# Plot sample trace
# Select the ROI and trial to plot
roi = 2
trial = 1
# Create the figure
plt.figure(figsize=(12, 6))
plt.plot(
raw[roi, trial][0, :],
lw=2,
label="Raw",
color=colors((roi * 2) % colors.N),
)
plt.plot(
result[roi, trial][0, :],
lw=2,
label="Decontaminated",
color=colors((roi * 2 + 1) % colors.N),
)
plt.title("ROI {}, Trial {}".format(roi, trial), fontsize=15)
plt.xlabel("Time (frame number)", fontsize=15)
plt.ylabel("Signal intensity (candela per unit area)", fontsize=15)
plt.grid()
plt.legend()
plt.show()
# Plot all ROIs and trials
# Get the number of ROIs and trials
n_roi = result.shape[0]
n_trial = result.shape[1]
# Find the maximum signal intensities for each ROI
roi_max_raw = [
np.max([np.max(raw[i_roi, i_trial][0]) for i_trial in range(n_trial)])
for i_roi in range(n_roi)
]
roi_max_result = [
np.max([np.max(result[i_roi, i_trial][0]) for i_trial in range(n_trial)])
for i_roi in range(n_roi)
]
roi_max = np.maximum(roi_max_raw, roi_max_result)
# Plot our figure using subplot panels
plt.figure(figsize=(16, 10))
for i_roi in range(n_roi):
for i_trial in range(n_trial):
# Make subplot axes
i_subplot = 1 + i_trial * n_roi + i_roi
plt.subplot(n_trial, n_roi, i_subplot)
# Plot the data
plt.plot(
raw[i_roi][i_trial][0, :],
label="Raw",
color=colors((i_roi * 2) % colors.N),
)
plt.plot(
result[i_roi][i_trial][0, :],
label="Decontaminated",
color=colors((i_roi * 2 + 1) % colors.N),
)
# Labels and boiler plate
plt.ylim([-0.05 * roi_max[i_roi], roi_max[i_roi] * 1.05])
if i_roi == 0:
plt.ylabel(
"Trial {}\n\nSignal intensity\n(candela per unit area)".format(
i_trial + 1
)
)
if i_trial == 0:
plt.title("ROI {}".format(i_roi))
plt.legend()
if i_trial == n_trial - 1:
plt.xlabel("Time (frame number)")
plt.show()
# Get the number of neuropil/surrounding regions.
# The raw data has the raw ROI signal in raw[:, :][0] and raw surround
# signals in the rest of the 3rd dimension.
nRegions = raw[0, 0].shape[0] - 1
# Select the ROI and trial to plot
roi = 2
trial = 1
# Create the figure
plt.figure(figsize=(12, 12))
# Plot extracted traces for each neuropil subregion
plt.subplot(2, 1, 1)
# Plot trace of raw ROI signal
plt.plot(
raw[roi, trial][0, :],
lw=2,
label="Raw ROI signal",
color=colors((roi * 2) % colors.N),
)
# Plot traces from each neuropil region
for i_neuropil in range(1, nRegions + 1):
alpha = i_neuropil / nRegions
plt.plot(
raw[roi, trial][i_neuropil, :],
lw=2,
label="Neuropil region {}".format(i_neuropil),
color="k",
alpha=alpha,
)
plt.ylim([0, 125])
plt.grid()
plt.legend()
plt.ylabel("Signal intensity (candela per unit area)", fontsize=15)
plt.title("ROI {}, Trial {}, neuropil region traces".format(roi, trial), fontsize=15)
# Plot the ROI signal
plt.subplot(2, 1, 2)
# Plot trace of raw ROI signal
plt.plot(raw[roi, trial][0, :], lw=2, label="Raw", color=colors((roi * 2) % colors.N))
# Plot decontaminated signal matched to the ROI
plt.plot(
result[roi, trial][0, :],
lw=2,
label="Decontaminated",
color=colors((roi * 2 + 1) % colors.N),
)
plt.ylim([0, 125])
plt.grid()
plt.legend()
plt.xlabel("Time (frame number)", fontsize=15)
plt.ylabel("Signal intensity (candela per unit area)", fontsize=15)
plt.title("ROI {}, Trial {}, raw and decontaminated".format(roi, trial), fontsize=15)
plt.show()
sample_frequency = 10 # Hz
deltaf_result, deltaf_raw = fissa.run_fissa(
images_location, rois_location, freq=sample_frequency, return_deltaf=True
)
# Plot sample trace
# Select the ROI and trial to plot
roi = 2
trial = 1
# Create the figure
plt.figure(figsize=(12, 6))
plt.plot(
deltaf_raw[roi, trial][0, :],
lw=2,
label="Raw",
color=colors((roi * 2) % colors.N),
)
plt.plot(
deltaf_result[roi, trial][0, :],
lw=2,
label="Decontaminated",
color=colors((roi * 2 + 1) % colors.N),
)
plt.title("ROI {}, Trial {}".format(roi, trial), fontsize=15)
plt.xlabel("Time (frame number)", fontsize=15)
plt.ylabel(r"$\Delta f\,/\,f_0$", fontsize=15)
plt.grid()
plt.legend()
plt.show()
# Get the number of ROIs and trials
n_roi = result.shape[0]
n_trial = result.shape[1]
# Find the maximum signal intensities for each ROI,
# so we can scale ylim the same across subplots
roi_max = [
np.max([np.max(result[i_roi, i_trial][0]) for i_trial in range(n_trial)])
for i_roi in range(n_roi)
]
# Plot our figure using subplot panels
plt.figure(figsize=(16, 10))
for i_roi in range(n_roi):
for i_trial in range(n_trial):
# Make subplot axes
i_subplot = 1 + i_trial * n_roi + i_roi
plt.subplot(n_trial, n_roi, i_subplot)
# Plot the data
plt.plot(
result[i_roi][i_trial][0, :],
color=colors((i_roi * 2 + 1) % colors.N),
)
# Labels and boiler plate
plt.ylim([-0.05 * roi_max[i_roi], roi_max[i_roi] * 1.05])
if i_roi == 0:
plt.ylabel("Trial {}\n\n".format(i_trial + 1) + r"$\Delta f\,/\,f_0$")
if i_trial == 0:
plt.title("ROI {}".format(i_roi))
if i_trial == n_trial - 1:
plt.xlabel("Time (frame number)")
plt.show()
# Plot all ROIs and trials
# Get the number of ROIs and trials
n_roi = deltaf_result.shape[0]
n_trial = deltaf_result.shape[1]
# Find the maximum signal intensities for each ROI
roi_max_raw = [
np.max([np.max(deltaf_raw[i_roi, i_trial][0]) for i_trial in range(n_trial)])
for i_roi in range(n_roi)
]
roi_max_result = [
np.max([np.max(deltaf_result[i_roi, i_trial][0]) for i_trial in range(n_trial)])
for i_roi in range(n_roi)
]
roi_max = np.maximum(roi_max_raw, roi_max_result)
# Plot our figure using subplot panels
plt.figure(figsize=(16, 10))
for i_roi in range(n_roi):
for i_trial in range(n_trial):
# Make subplot axes
i_subplot = 1 + i_trial * n_roi + i_roi
plt.subplot(n_trial, n_roi, i_subplot)
# Plot the data
plt.plot(
deltaf_raw[i_roi][i_trial][0, :],
label="Raw",
color=colors((i_roi * 2) % colors.N),
)
plt.plot(
deltaf_result[i_roi][i_trial][0, :],
label="Decontaminated",
color=colors((i_roi * 2 + 1) % colors.N),
)
# Labels and boiler plate
plt.ylim([-0.05 * roi_max[i_roi], roi_max[i_roi] * 1.05])
if i_roi == 0:
plt.ylabel("Trial {}\n\n".format(i_trial + 1) + r"$\Delta f\,/\,f_0$")
if i_trial == 0:
plt.title("ROI {}".format(i_roi))
plt.legend()
if i_trial == n_trial - 1:
plt.xlabel("Time (frame number)")
plt.show()
# Define the folder where FISSA's outputs will be cached, so they can be
# quickly reloaded in the future without having to recompute them.
#
# This argument is optional; if it is not provided, FISSA will not save its
# results for later use.
#
# If the output directory already exists, FISSA will load the contents of
# the cache instead of recomputing it.
#
# Note: you *must* use a different folder for each experiment, otherwise
# FISSA will load the existing data instead of computing results for the
# new experiment.
#
# In this example, we will use the current datetime as the name of the
# experiment, but you can name your experiments however you want to.
# If you want to take advantage of the caching of results, you should use
# a more descriptive name than this so you can identify the actual
# dataset that the FISSA results correspond to, and load them appropriately.
import datetime
output_folder = "fissa-example_{}".format(
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
print(output_folder)
# Run FISSA, saving to results to output_folder
result, raw = fissa.run_fissa(images_location, rois_location, folder=output_folder)
# Run FISSA, loading results from cache in output_folder
result, raw = fissa.run_fissa(images_location, rois_location, folder=output_folder)
result, raw = fissa.run_fissa(
images_location, rois_location, folder=output_folder, export_to_matfile=True
)
result, raw = fissa.run_fissa(
images_location, rois_location, export_to_matfile="experiment_results.mat"
)
# Call FISSA with elevated verbosity
result = fissa.run_fissa(images_location, rois_location, verbosity=2)
# FISSA uses multiprocessing to speed up its processing.
# By default, it will spawn one worker per CPU core on your machine.
# However, if you have a lot of cores and not much memory, you many not
# be able to suport so many workers simultaneously.
# In particular, this can be problematic during the data preparation step
# in which TIFFs are loaded into memory.
# The default number of cores for the data preparation and separation steps
# can be changed as follows.
ncores_preparation = 4 # If None, uses all available cores
ncores_separation = None # if None, uses all available cores
# By default, FISSA uses 4 subregions for the neuropil region.
# If you have very dense data with a lot of different signals per unit area,
# you may wish to increase the number of regions.
n_regions = 8
# By default, each surrounding region has the same area as the central ROI.
# i.e. expansion = 1
# However, you may wish to increase or decrease this value.
expansion = 0.75
# The degree of signal sparsity can be controlled with the alpha parameter.
alpha = 0.02
# If you change the experiment parameters, you need to change the cache directory too.
# Otherwise FISSA will try to reload the results from the previous run instead of
# computing the new results. FISSA will throw an error if you try to load data which
# was generated with different analysis parameters to the current parameters.
output_folder2 = output_folder + "_alt"
# Run FISSA with these parameters
result, raw = fissa.run_fissa(
images_location,
rois_location,
output_folder2,
nRegions=n_regions,
expansion=expansion,
alpha=alpha,
ncores_preparation=ncores_preparation,
ncores_separation=ncores_separation,
)
n_roi = result.shape[0]
n_trial = result.shape[1]
i_roi = 3
plt.figure(figsize=(12, 6))
for i_trial in range(n_trial):
plt.plot(result[i_roi, i_trial][0, :], label="Trial {}".format(i_trial + 1))
plt.title("ROI {}".format(i_roi), fontsize=15)
plt.xlabel("Time (Frame number)", fontsize=15)
plt.ylabel("Signal intensity (candela per unit area)", fontsize=15)
plt.grid()
plt.legend()
plt.show()
result = fissa.run_fissa(images_location, rois_location, ncores_preparation=1)
result, raw = fissa.run_fissa(images_location, rois_location, lowmemory_mode=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also need to import some plotting dependencies which we'll make use in this notebook to display the results.
Step2: Running FISSA
Step3: The function-based interface is very straight forward, but note that you can only access the result which is returned by the function.
Step4: Let's next plot the traces across all ROIs and trials.
Step5: Comparing ROI signal to neuropil region signals
Step6: df/f<sub>0</sub>
Step7: Note that by default, f<sub>0</sub> is determined as the minimum across all trials (all TIFFs) to ensure that results are directly comparable between trials, but you can normalise each trial individually instead if you prefer by providing the parameter deltaf_across_trials=False.
Step8: Since FISSA is very good at removing contamination from the ROI signals, the minimum value on the decontaminated trace will typically be 0..
Step9: For comparison purposes, we can also plot the df/f<sub>0</sub> for the raw data against the decontaminated signal.
Step10: Caching
Step11: Let's run FISSA on this experiment again, but this time save the results to the experiment's output directory.
Step12: A subsequent call to fissa.run_fissa which uses the same experiment folder argument will load the cached data instead of re-running the FISSA signal separation routine from scratch.
Step13: Exporting to MATLAB
Step14: Alternatively, we can export to a matfile with a custom file name by setting the export_to_matfile argument to the target path.
Step15: Loading the generated file (e.g. "output_folder/separated.mat") in MATLAB will provide you with all of FISSA's outputs.
Step16: Analysis parameters
Step17: We can plot the new results for our example trace from before. Although we doubled the number of neuropil regions around the cell, very little has changed for this example because there were not many sources of contamination.
Step18: Working with very large tiff files
Step19: Alternatively, you can activate FISSA's low memory mode.
|
2,550
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import pandas as pd
import lmfit
from fretbursts import *
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
%config InlineBackend.figure_format='retina' # for hi-dpi displays
sns.set_style('whitegrid')
#bsearch_ph_sel = 'AND-gate'
bsearch_ph_sel = 'Dex'
data_file = 'results/usALEX-5samples-PR-raw-%s.csv' % bsearch_ph_sel
data_raw = pd.read_csv(data_file).set_index('sample')
data_raw[['E_gauss_w', 'E_kde_w', 'E_gauss_w_err', 'E_gauss_w_fiterr', 'n_bursts_all', 'n_bursts_fret']]
data_file = 'results/usALEX-5samples-E-corrected-all-ph.csv'
data_corr = pd.read_csv(data_file).set_index('sample')
data_corr[['E_gauss_w', 'E_kde_w', 'E_gauss_w_err', 'E_gauss_w_fiterr', 'n_bursts_all', 'n_bursts_fret']]
raw_data_file_sna = 'results/alix/us-ALEX SNA Results 2016-10-12.csv'
rsna = pd.read_csv(raw_data_file_sna, index_col=0)
rsna
sna = rsna[['<E>', 'Mode']].round(4)
sna.columns = ['SNA_E_mean', 'SNA_E_max']
sna
data_file_sna = 'results/usALEX-5samples-E-SNA.csv'
sna.to_csv(data_file_sna)
leakage_coeff_fname = 'results/usALEX - leakage coefficient DexDem.csv'
leakage = np.loadtxt(leakage_coeff_fname)
print('Leakage coefficient:', leakage)
dir_ex_coeff_fname = 'results/usALEX - direct excitation coefficient dir_ex_aa.csv'
dir_ex_aa = np.loadtxt(dir_ex_coeff_fname)
print('Dir. excitation AA:', dir_ex_aa)
dir_ex_t_datasheet_fname = 'results/Dyes - ATT0647N-ATTO550 abs X-section ratio at 532nm.csv'
dir_ex_t_datasheet = np.loadtxt(dir_ex_t_datasheet_fname)
print('Direct excitation (dir_ex_t) from datasheet:', dir_ex_t_datasheet)
gamma_coeff_fname = 'results/usALEX - gamma factor - all-ph.csv'
gamma = np.loadtxt(gamma_coeff_fname)
print('Gamma factor:', gamma)
beta_coeff_fname = 'results/usALEX - beta factor - all-ph.csv'
beta = np.loadtxt(beta_coeff_fname)
print('Beta factor:', beta)
dir_ex_t_beta = dir_ex_aa * beta
'%.5f' % dir_ex_t_beta
with open('results/usALEX - direct excitation coefficient dir_ex_t beta.csv', 'w') as f:
f.write('%.5f' % dir_ex_t_beta)
PR_corr_kde = fretmath.correct_E_gamma_leak_dir(data_raw.E_kde_w,
leakage=leakage,
dir_ex_t=dir_ex_t_beta,
gamma=gamma)*100
PR_corr_kde
PR_corr_gauss = fretmath.correct_E_gamma_leak_dir(data_raw.E_gauss_w,
leakage=leakage,
dir_ex_t=dir_ex_t_beta,
gamma=gamma)*100
PR_corr_gauss
dir_ex_t_datasheet
E_datasheet = fretmath.correct_E_gamma_leak_dir(data_raw.E_kde_w,
leakage=leakage,
dir_ex_t=dir_ex_t_datasheet,
gamma=gamma)*100
E_datasheet
out = data_corr[['E_kde_w']].copy()*100
out.columns = ['E_alex']
out['E_datasheet'] = E_datasheet
out
out.plot(alpha=0.4, lw=3, style=dict(E_alex='-ob', E_datasheet='-sr'));
def residuals_absolute(params, E_raw, E_ref):
dir_ex_t = params['dir_ex_t'].value
return E_ref - fretmath.correct_E_gamma_leak_dir(E_raw,
leakage=leakage,
gamma=gamma,
dir_ex_t=dir_ex_t)
def residuals_relative(params, E_raw, E_ref):
dir_ex_t = params['dir_ex_t'].value
return (E_ref - fretmath.correct_E_gamma_leak_dir(E_raw,
leakage=leakage,
gamma=gamma,
dir_ex_t=dir_ex_t))/E_ref
params = lmfit.Parameters()
params.add('dir_ex_t', value=0.05)
m = lmfit.minimize(residuals_absolute, params, args=(data_raw.E_kde_w, data_corr.E_kde_w))
lmfit.report_fit(m.params, show_correl=False)
m = lmfit.minimize(residuals_relative, params, args=(data_raw.E_kde_w, data_corr.E_kde_w))
lmfit.report_fit(m.params, show_correl=False)
'%.5f' % m.params['dir_ex_t'].value
with open('results/usALEX - direct excitation coefficient dir_ex_t fit.csv', 'w') as f:
f.write('%.5f' % m.params['dir_ex_t'].value)
PR_corr_kde_dfit = fretmath.correct_E_gamma_leak_dir(data_raw.E_kde_w,
leakage=leakage,
dir_ex_t=m.params['dir_ex_t'].value,
gamma=gamma)*100
PR_corr_kde_dfit.name = 'PR_corr_kde_dfit'
PR_corr_kde_dfit
E = pd.concat([data_corr[['E_kde_w', 'E_gauss_w']]*100, PR_corr_kde, PR_corr_gauss, sna*100], axis=1)
E.columns = ['E KDE', 'E Gauss', 'PR KDE', 'PR Gauss', 'SNA E mean', 'SNA E max']
E
E.plot.bar(table=np.round(E, 2).T)
plt.ylabel('FRET (%)')
plt.gca().xaxis.set_visible(False)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.);
Eerr = (pd.DataFrame().assign(
E_gauss_w_err = data_corr.E_gauss_w_err, E_gauss_w_fiterr = data_corr.E_gauss_w_fiterr,
PR_gauss_w_err = data_raw.E_gauss_w_err, PR_gauss_w_fiterr = data_raw.E_gauss_w_fiterr,
SNA_SDV=rsna['SDV(E)']))*100
Eerr
nbursts = data_corr[['n_bursts_all', 'n_bursts_fret']]
nbursts
E[['PR KDE', 'PR Gauss', 'E KDE']].plot(kind='bar')
E[['PR KDE', 'PR Gauss', 'E KDE']].plot(lw=3);
print('Max error E_alex vs E_corr_pr: %.2f' % (E['E KDE'] - E['PR KDE']).abs().max())
print('Max error E_alex vs E_beta: %.2f' % (E['E KDE'] - E['PR Gauss']).abs().max())
print('Max error E_beta vs E_corr_pr: %.2f' % (E['PR Gauss'] - E['PR KDE']).abs().max())
x = [int(idx[:-1]) for idx in out.index]
plt.plot(x, 'E KDE', data=E)
plt.plot(x, 'PR KDE', data=E)
plt.plot(x, 'PR Gauss', data=E)
plt.xlabel('Distance in base-pair')
plt.ylabel('FRET');
E['E KDE'] - E['PR KDE']
E.to_csv('results/usALEX-5samples-E-all-methods.csv', float_format='%.3f')
E.round(3)
Eerr.to_csv('results/usALEX-5samples-E-all-methods_errors.csv', float_format='%.4f')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Raw PR
Step2: These are the RAW proximity ratios for the 5 samples (only background correction, no leakage nor direct excitation)
Step3: Columns legend
Step4: Columns legend
Step5: Load μs-ALEX corrections
Step6: Procedure 1
Step7: With this coefficient, computing the corrected $E$ for the 5 dsDNA samples
Step8: Procedure 2
Step9: With this the corrected $E$ for the 5 dsDNA samples are
Step10: Comparing these values with the ones obtained fitting the
Step11: NOTE
Step12: NOTE
Step13: Corrected E
Step14: Estimated errors
Step15: Note on standard errors
Step16: NOTE
|
2,551
|
<ASSISTANT_TASK:>
Python Code:
import random
n = 10
data = [random.randint(1, 10) for _ in range(n)]
data # this print out the variable's content
def nsqrt(x): # do not change the heading of the function
pass # **replace** this line with your code
print(nsqrt(11), nsqrt(1369))
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import math
def f(x):
return x * math.log(x) - 16.0
xvals = np.arange(0.01, 10, 0.01)
yvals = np.array([f(x) for x in xvals])
plt.plot(xvals, yvals)
plt.plot(xvals, 0*xvals)
plt.show()
def fprime(x):
return 1.0 + math.log(x)
'''
x_0: initial guess
EPSILON: stop when abs(x - x_new) < EPSILON
MAX_ITER: maximum number of iterations
NOTE: you must use the default values of the above parameters, do not change them
'''
def find_root(f, fprime, x_0=1.0, EPSILON = 1E-7, MAX_ITER = 1000): # do not change the heading of the function
pass # **replace** this line with your code
x = find_root(f, fprime)
print(x)
print(f(x))
comb(6, 3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: You can test your implementation using the following code.
Step3: Exercise 2
Step4: To find $x$ for the equation, we need to compute the derivative of $f(x)$, i.e., $f'(x)$ first.
Step5: Now you need to implement Newton's method below.
Step6: You can test your implementation using the following code.
Step7: Exercise 3
|
2,552
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
from sklearn.metrics import mean_absolute_error
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import shutil
%matplotlib inline
def generate_sequence(length, period, decay):
return [0.5 + 0.5 * math.sin(2 * math.pi * i / period) * math.exp(-decay * i)
for i in range(length)]
def generate_data(num_samples, input_length, output_length):
xs, ys = [], []
for _ in range(num_samples):
p = np.random.randint(low=10, high=20)
d = np.random.uniform(0.01, 0.1)
sequence = generate_sequence(input_length + output_length, p, d)
xs.append(sequence[:-output_length])
ys.append(sequence[-output_length:])
X = np.array(xs).reshape(num_samples, input_length, 1)
y = np.array(ys).reshape(num_samples, output_length)
return X, y
X, y = generate_data(5, 20, 5)
print(X.shape, y.shape)
for i in range(X.shape[0]):
plt.plot([x for x in X[i, :, 0]] + [x for x in y[i]], marker="o")
plt.axvline(x=20, color="k", linestyle="--")
plt.show()
NUM_TRAIN_SAMPLES = 1000
NUM_VAL_SAMPLES = 10
NUM_TEST_SAMPLES = 100
INPUT_SEQ_LEN = 50
OUTPUT_SEQ_LEN = 5
INPUT_DIM = 1
HIDDEN_DIM = 20
NUM_LAYERS = 2
OUTPUT_DIM = 5
LEARNING_RATE = 1e-3
BATCH_SIZE = 10
NUM_EPOCHS = 10
DATA_DIR = "../../data"
MODEL_FILE = os.path.join(DATA_DIR, "torch-07-dsw-pred-{:d}.model")
Xtrain, ytrain = generate_data(NUM_TRAIN_SAMPLES, INPUT_SEQ_LEN, OUTPUT_SEQ_LEN)
Xval, yval = generate_data(NUM_VAL_SAMPLES, INPUT_SEQ_LEN, OUTPUT_SEQ_LEN)
Xtest, ytest = generate_data(NUM_TEST_SAMPLES, INPUT_SEQ_LEN, OUTPUT_SEQ_LEN)
print(Xtrain.shape, ytrain.shape, Xval.shape, yval.shape, Xtest.shape, ytest.shape)
class DampedSineWavePredictor(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(DampedSineWavePredictor, self).__init__()
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
if torch.cuda.is_available():
h0 = (Variable(torch.randn(self.num_layers, x.size(0), self.hidden_dim).cuda()),
Variable(torch.randn(self.num_layers, x.size(0), self.hidden_dim).cuda()))
else:
h0 = (Variable(torch.randn(self.num_layers, x.size(0), self.hidden_dim)),
Variable(torch.randn(self.num_layers, x.size(0), self.hidden_dim)))
out, h0 = self.lstm(x, h0)
out = self.fc(out[:, -1, :])
return out
model = DampedSineWavePredictor(INPUT_DIM, HIDDEN_DIM, NUM_LAYERS, OUTPUT_DIM)
if torch.cuda.is_available():
model.cuda()
print(model)
loss_fn = nn.L1Loss()
# loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
history = []
for epoch in range(NUM_EPOCHS):
num_batches = Xtrain.shape[0] // BATCH_SIZE
shuffled_indices = np.random.permutation(np.arange(Xtrain.shape[0]))
train_loss = 0.
for bid in range(num_batches):
Xbatch_data = Xtrain[shuffled_indices[bid * BATCH_SIZE : (bid+1) * BATCH_SIZE]]
ybatch_data = ytrain[shuffled_indices[bid * BATCH_SIZE : (bid+1) * BATCH_SIZE]]
Xbatch = Variable(torch.from_numpy(Xbatch_data).float())
ybatch = Variable(torch.from_numpy(ybatch_data).float())
if torch.cuda.is_available():
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
# initialize gradients
optimizer.zero_grad()
# forward
ybatch_ = model(Xbatch)
loss = loss_fn(ybatch_, ybatch)
# backward
loss.backward()
if torch.cuda.is_available():
train_loss += loss.cpu().data[0]
else:
train_loss += loss.data[0]
optimizer.step()
# compute training loss and accuracy
train_loss /= num_batches
# compute validation loss and accuracy
num_val_batches = Xval.shape[0] // BATCH_SIZE
val_loss = 0.
for bid in range(num_val_batches):
Xbatch_data = Xval[bid * BATCH_SIZE : (bid+1) * BATCH_SIZE]
ybatch_data = yval[bid * BATCH_SIZE : (bid+1) * BATCH_SIZE]
Xbatch = Variable(torch.from_numpy(Xbatch_data).float())
ybatch = Variable(torch.from_numpy(ybatch_data).float())
if torch.cuda.is_available():
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
ybatch_ = model(Xbatch)
loss = loss_fn(ybatch_, ybatch)
if torch.cuda.is_available():
val_loss += loss.cpu().data[0]
else:
val_loss += loss.data[0]
val_loss /= num_val_batches
torch.save(model.state_dict(), MODEL_FILE.format(epoch+1))
print("Epoch {:2d}/{:d}: loss={:.3f}, val_loss={:.3f}"
.format((epoch+1), NUM_EPOCHS, train_loss, val_loss))
history.append((train_loss, val_loss))
train_losses = [x[0] for x in history]
val_losses = [x[1] for x in history]
plt.plot(train_losses, color="b", label="train")
plt.plot(val_losses, color="r", label="val")
plt.legend(loc="best")
plt.xlabel("epochs")
plt.ylabel("loss (MAE)")
plt.show()
saved_model = DampedSineWavePredictor(INPUT_DIM, HIDDEN_DIM, NUM_LAYERS, OUTPUT_DIM)
saved_model.load_state_dict(torch.load(MODEL_FILE.format(1)))
if torch.cuda.is_available():
saved_model.cuda()
ylabels, ypreds = [], []
num_test_batches = Xtest.shape[0] // BATCH_SIZE
for bid in range(num_test_batches):
Xbatch_data = Xtest[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]
ybatch_data = ytest[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]
Xbatch = Variable(torch.from_numpy(Xbatch_data).float())
ybatch = Variable(torch.from_numpy(ybatch_data).float())
if torch.cuda.is_available():
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
Ybatch_ = saved_model(Xbatch)
if torch.cuda.is_available():
ylabels.extend(ybatch.cpu().data.numpy())
ypreds.extend(ybatch_.cpu().data.numpy())
else:
ylabels.extend(ybatch.data.numpy())
ypreds.extend(ybatch_.data.numpy())
test_mae = mean_absolute_error(ypreds, ylabels)
print("MAE on test set: {:.5f}".format(test_mae))
samples = np.random.randint(0, Xtest.shape[0], 3)
for i in range(len(samples)):
plt.subplot(1, 3, (i+1))
label = ylabels[samples[i]]
pred = ypreds[samples[i]]
xs = np.arange(len(label))
plt.ylim([0., 1.])
plt.plot(xs, label, color="b", marker="o", label="y")
plt.plot(xs, pred, color="orange", marker="o", label="yhat")
plt.legend(loc="best")
plt.tight_layout()
plt.show()
for i in range(NUM_EPOCHS):
os.remove(MODEL_FILE.format(i + 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize Input
Step2: Define Constants
Step3: Generate Training Data
Step4: Define Network
Step5: Train Network
Step6: Test Model
|
2,553
|
<ASSISTANT_TASK:>
Python Code:
environment_directory = "environments/"
identifier = "test_all_methods"
log_directory = "log/"
if not os.path.exists('log'):
os.makedirs('log')
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for run: %s', environment_directory + identifier + ".xml")
#tree = ET.parse('environments/test_all_methods.xml')
tree = ET.parse('environments/solow_parameters.xml')
root = tree.getroot()
# all item attributes
print('All parameters: ')
for elem in root:
print(elem.attrib)
environment = Environment(environment_directory, identifier)
runner = Runner(environment)
for i in range(int(environment.num_simulations)):
logging.info(' STARTED with run %s', str(i))
environment.initialize(environment_directory, identifier)
runner.initialize(environment)
runner.do_run(environment)
logging.info(' DONE')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, after a log folder is created, if it doesn't exist, the logger will be initialized. The log files will contain information about how the simulation went.
Step2: Before initializing the model, let's inspect the parameters that are used to initialize it.
Step3: First, the environment will be initialized. It contains the model parameters and variables. The environment also contains a list of agents. In the Solow model, the environment contains one bank, one firm, and one household. Finally, it also contains initialized transactions for these agents
Step4: Next up, the runner object is created. As the name suggests, the runner object handles the running of the model. But that is not all, it also initializes the updater.
Step5: Now you can actually run the model. Since the agent-based model is simulated using Monte Carlo methods. This means that the model will be run multiple times (num_simulations). For every simulation run, the environment and runner will be (re-)initialized. Then, the runner will run the Solow model. During the run, the runner will write data about capital to a csv file specified within the Measurement class and print out all the transactions happening and the books of firm and household at the end of each sweep.
|
2,554
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
import sys, os
import matplotlib.pyplot as plt
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
import pynoddy.history
%matplotlib inline
reload(pynoddy.history)
reload(pynoddy.events)
reload(pynoddy)
history = "feature_model.his"
output_name = "feature_out"
nm = pynoddy.history.NoddyHistory()
# add stratigraphy
strati_options = {'num_layers' : 5,
'layer_names' : ['layer 1', 'layer 2', 'layer 3',
'layer 4', 'layer 5'],
'layer_thickness' : [1500, 500, 500, 1500, 500]}
nm.add_event('stratigraphy', strati_options )
fold_options = {'name' : 'Fold',
'pos' : (4000, 3500, 5000),
'amplitude' : 200,
'dip_dir' : 135.0,
'wavelength' : 5000}
nm.add_event('fold', fold_options)
# The following options define the fault geometry:
fault_options = {'name' : 'Fault_W',
'pos' : (4000, 3500, 5000),
'dip_dir' : 90,
'dip' : 60,
'slip' : 1000}
nm.add_event('fault', fault_options)
# The following options define the fault geometry:
fault_options = {'name' : 'Fault_E',
'pos' : (6000, 3500, 5000),
'dip_dir' : 270,
'dip' : 60,
'slip' : 1000}
nm.add_event('fault', fault_options)
nm.write_history(history)
# Change cube size
nm1 = pynoddy.history.NoddyHistory(history)
nm1.change_cube_size(100)
nm1.write_history(history)
# Compute the model
pynoddy.compute_model(history, output_name)
# Plot output
import pynoddy.output
reload(pynoddy.output)
nout = pynoddy.output.NoddyOutput(output_name)
nout.plot_section('x', layer_labels = strati_options['layer_names'][::-1],
colorbar = True, title="",
savefig = False, fig_filename = "ex01_faults_combined.eps",
cmap = 'YlOrRd') # note: YlOrRd colourmap should be suitable for colorblindness!
nout.export_to_vtk(vtk_filename = "feature_model")
import os
history_file = os.path.join(repo_path, "examples/GBasin_Ve1_V4.his")
his_gipps = pynoddy.history.NoddyHistory(history_file)
his_gipps.events[2].properties
import numpy as np
np.unique(nout.block)
cov = [[0.08, 0.0, 0.],
[0.0, 0.01, 0.],
[0., 0., 0.05]]
# define mean values for features
feature1_means = [1.0, 1.5, 1.2, 1.0, 1.2]
feature2_means = [5.1, 5.2, 5.12, 5.0, 5.1]
feature3_means = [1.0, 1.2, 1.4, 1.6, 1.8]
# resort into unit means
means_units = [[m1, m2, m3] for m1, m2, m3 in
zip(feature1_means, feature2_means, feature3_means)]
print means_units, np.array(cov)**0.5
print np.sqrt(cov)
f1, f2, f3 = np.random.multivariate_normal(means_units[0], cov, 1000).T
n1 = int(np.sum(nout.block[nout.block == 1.0]))
# sample for geological unit 1
f1, f2, f3 = np.random.multivariate_normal(means_units[0], cov, n1).T
tmp = np.copy(nout.block)
tmp[tmp == 1.0] = f1
plt.imshow(tmp[0,:,:].T, origin = 'lower_left', interpolation = 'nearest')
# create empty feature fields:
feature_field_1 = np.copy(nout.block)
feature_field_2 = np.copy(nout.block)
feature_field_3 = np.copy(nout.block)
for unit_id in np.unique(nout.block):
print unit_id
n_tmp = int(np.sum(nout.block == unit_id))
f1, f2, f3 = np.random.multivariate_normal(means_units[int(unit_id-1)], cov, n_tmp).T
feature_field_1[feature_field_1 == unit_id] = f1
feature_field_2[feature_field_2 == unit_id] = f2
feature_field_3[feature_field_3 == unit_id] = f3
# Export feature fields to VTK (via pynoddy output file)
nout.block = feature_field_1
nout.export_to_vtk(vtk_filename = "feature_field_1")
nout.block = feature_field_2
nout.export_to_vtk(vtk_filename = "feature_field_2")
nout.block = feature_field_3
nout.export_to_vtk(vtk_filename = "feature_field_3")
# write to feature file for Jack
feature_file = open("features_midres_2.csv", 'w')
feature_file.write("x, y, z, f1, f2, f3\n")
for zz in range(nout.nz):
for yy in range(nout.ny):
for xx in range(nout.nx):
feature_file.write("%d, %d, %d, %.5f, %.5f, %.5f\n" %
(xx, yy, zz, feature_field_1[xx, yy, zz],
feature_field_2[xx, yy, zz], feature_field_3[xx, yy, zz]))
feature_file.close()
# Export labels:
nout = pynoddy.output.NoddyOutput(output_name)
label_file = open("labels.csv", 'w')
label_file.write("x, y, z, label\n")
for zz in range(nout.nz):
for yy in range(nout.ny):
for xx in range(nout.nx):
label_file.write("%d, %d, %d, %df\n" %
(xx, yy, zz, nout.block[xx,yy,zz]))
label_file.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Adding features to geological layers
Step2: ok, seems to work - now for all
|
2,555
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from __future__ import division
%config InlineBackend.figure_formats=['svg']
%matplotlib inline
plt.rc('pdf',fonttype=3) # for proper subsetting of fonts
plt.rc('axes',linewidth=0.5) # thin axes; the default for lines is 1pt
al = np.linspace( 0.05, 0.15, 500)
plt.plot(al, (1.11 + 1.11*al**(-0.18))/(1 - al), 'k')
plt.axis([0.05, 0.15, 3.09,3.17])
plt.title("Objective Function")
plt.ylabel("f")
plt.xlabel("x/D")
plt.show()
#Equal Interval Search
#Erin Schmidt
#Adapted, with significant modification, from Arora et al.'s APOLLO
#implementation found in "Introduction to Optimum Design" 1st Ed. (1989).
import numpy as np
def func(al, count): #the objective function
count = count + 1
f = (1.11 + 1.11*al**(-0.18))/(1 - al)
return f, count
def mini(au, al, count): #evaluates f at the minimum (or optimum) stationary point
alpha = (au + al)*0.5
(f, count) = func(alpha, count)
return f, alpha, count
def equal(delta, epsilon, count, al):
(f, count) = func(al, count)
fl = f #function value at lower bound
#delta = 0.01 #step-size
#au = 0.15 #alpha upper bound
while True:
aa = delta
(f, count) = func(aa, count)
fa = f
if fa > fl:
delta = delta * 0.1
else:
break
while True:
au = aa + delta
(f, count) = func(au, count)
fu = f
if fa > fu:
al = aa
aa = au
fl = fa
fa = fu
else:
break
while True:
if (au - al) > epsilon: #compares interval size to convergence criteria
delta = delta * 0.1
aa = al #intermediate alpha
fa = fl #intermediate alpha function value
while True:
au = aa + delta
(f, count) = func(au, count)
fu = f
if fa > fu:
al = aa
aa = au
fl = fa
fa = fu
continue
else:
break
continue
else:
(f, alpha, count) = mini(au, al, count)
return f, alpha, count
#run the program
delta = 0.01
epsilon = 1E-3
count = 0
al = 0.01 # alpha lower bound
(f, alpha, count) = equal(delta, epsilon, count, al)
print('The minimum is at {:.4f}'.format(alpha))
print('The function value at the minimum = {:.4f}'.format(f))
print('Total number of function calls = {}'.format(count))
# Polynomial approximation (4-point cubic)
# -Erin Schmidt
import numpy as np
from math import sqrt
# make an array with random values between 0.05 and 0.15 with 4 entries
x = (0.05 + np.random.sample(4)*0.15)
# make an array of function values at the 4 points of x
def f(x): # the objective function
return (1.11 + 1.11*x**(-0.18))/(1 - x)
f_array = []
i = 0
while i <= len(x) - 1:
f_array.append(f(x[i]))
i += 1
# use the equations from Vanderplaats 1984 to solve coefficients
q1 = x[2]**3 * (x[1] - x[0]) - x[1]**3 * (x[2] - x[0]) + x[0]**3 * (x[2] - x[1])
q2 = x[3]**3 * (x[1] - x[0]) - x[1]**3 * (x[3] - x[0]) + x[0]**3 * (x[3] - x[1])
q3 = (x[2] - x[1]) * (x[1] - x[0]) * (x[2] - x[0])
q4 = (x[3] - x[1]) * (x[1] - x[0]) * (x[3] - x[0])
q5 = f_array[2] * (x[1] - x[0]) - f_array[1] * (x[2] - x[0]) + f_array[0] * (x[2] - x[1])
q6 = f_array[3] * (x[1] - x[0]) - f_array[1] * (x[3] - x[0]) + f_array[0] * (x[3] - x[1])
a3 = (q3*q6 - q4*q5)/(q2*q3 - q1*q4)
a2 = (q5 - a3*q1)/q3
a1 = (f_array[1] - f_array[0])/(x[1] - x[0]) - \
a3*(x[1]**3 - x[0]**3)/(x[1] - x[0]) - a2*(x[0] + x[1])
a0 = f_array[0] - a1*x[0] - a2*x[0]**2 - a3*x[0]**3
a = [a1, 2*a2, 3*a3] #coefficients of f'
# find the zeros of the f' polynomial (using the quadratic formula)
b = a2**2 - 3*a1*a3
X1 = (-a2 + sqrt(b))/(3*a3)
X2 = (-a2 - sqrt(b))/(3*a3)
print('roots = ', X1, X2)
# plot the results
plt.rc('pdf',fonttype=3) # for proper subsetting of fonts
plt.rc('axes',linewidth=0.5) # thin axes; the default for lines is 1pt
x = np.linspace( 0.05, 0.15, 500)
plt.plot(x, a0 +a1*x + a2*x**2 + a3*x**3, 'k--', label='Poly. approx.')
plt.plot(x, (1.11 + 1.11*x**(-0.18))/(1 - x), 'k', label='Objective func.')
plt.axis([0.05, 0.15, 3.09,3.17])
legend = plt.legend(loc='upper center', shadow=False, fontsize='large')
plt.ylabel("f")
plt.xlabel("x/D")
plt.show()
poly_root = [0.097620387704, 0.0985634827486, 0.0969340736066, \
0.098775097463, 0.102426814371, 0.101638472077, \
0.0991941169039, 0.095873175811]
print('polynomial root std. deviation = ', np.std(poly_root))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can see (at least qualitatively), from the plot of the objective funtion that on the interval $0.05 < \alpha < 0.15$ the optimum value lies somewhere between 0.09 and 0.10, and the function evaluated in that range has an average value of about 3.10.
Step2: Polynomial Approximation
|
2,556
|
<ASSISTANT_TASK:>
Python Code:
from tensorflow import keras
import numpy
x = numpy.array([0, 1, 2, 3, 4])
y = x * 2 + 1
model = keras.models.Sequential()
model.add(keras.layers.Dense(1,input_shape=(1,)))
model.compile('SGD', 'mse')
model.fit(x[:2], y[:2], epochs=1000, verbose=0)
print(model.predict(x))
import tensorflow as tf2
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
model = tf2.keras.Sequential()
model.add(tf2.keras.layers.Dense(1, input_dim = 1))
model.build()
Optimizer = tf2.keras.optimizers.Adam(learning_rate = 0.01)
for epoch in range(1000):
with tf2.GradientTape() as tape:
y_pr = model(x[:2,:1])
loss = tf2.keras.losses.mean_squared_error(y[:2,:1], y_pr)
gradients = tape.gradient(loss, model.trainable_variables)
Optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print(model.predict(x))
import tensorflow as tf2
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
model = tf2.keras.Sequential()
model.add(tf2.keras.layers.Dense(1, input_dim = 1))
model.build()
print('w=', model.trainable_variables[0].numpy(), 'b=', model.trainable_variables[1].numpy())
print()
Optimizer = tf2.keras.optimizers.Adam(learning_rate = 0.01)
for epoch in range(1000):
with tf2.GradientTape() as tape:
y_pr = model(x[:2,:1])
loss = tf2.keras.losses.mean_squared_error(y[:2,:1], y_pr)
if epoch < 3:
print(f'Epoch:{epoch}')
print('y_pr:', y_pr.numpy())
print('y_tr:', y[:2,:1])
print('loss:', loss.numpy())
print()
gradients = tape.gradient(loss, model.trainable_variables)
Optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print(model.predict(x))
import tensorflow as tf2
from tensorflow import keras
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
class Model(keras.models.Model):
def __init__(self):
super().__init__()
# self.layer = keras.layers.Dense(1, input_shape=[None,1])
self.layer = keras.layers.Dense(1, input_dim=1)
def call(self, x):
return self.layer(x)
model = Model()
Optimizer = tf2.keras.optimizers.Adam(learning_rate = 0.01)
for epoch in range(1000):
with tf2.GradientTape() as tape:
y_pr = model(x[:2,:1])
loss = tf2.keras.losses.mean_squared_error(y[:2,:1], y_pr)
gradients = tape.gradient(loss, model.trainable_variables)
Optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print(model.predict(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: II. 케라스 인터페이스를 사용하는 텐서플로 2.0 사용법(Tensorflow 2.0 with Keras IO)
Step2: 간단한 구성에 진행 결과 보이기
Step3: 클래스를 이용한 네트웍 모델 구성하기
|
2,557
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import tellurium as te
te.setDefaultPlottingEngine('matplotlib')
%matplotlib inline
import phrasedml
antimony_str = '''
model myModel
S1 -> S2; k1*S1
S1 = 10; S2 = 0
k1 = 1
end
'''
phrasedml_str = '''
model1 = model "myModel"
sim1 = simulate uniform(0, 5, 100)
task1 = run sim1 on model1
plot "Figure 1" time vs S1, S2
'''
# create the sedml xml string from the phrasedml
sbml_str = te.antimonyToSBML(antimony_str)
phrasedml.setReferencedSBML("myModel", sbml_str)
sedml_str = phrasedml.convertString(phrasedml_str)
if sedml_str == None:
print(phrasedml.getLastPhrasedError())
print(sedml_str)
import tempfile, os, shutil
workingDir = tempfile.mkdtemp(suffix="_sedml")
sbml_file = os.path.join(workingDir, 'myModel')
sedml_file = os.path.join(workingDir, 'sed_main.xml')
with open(sbml_file, 'wb') as f:
f.write(sbml_str.encode('utf-8'))
f.flush()
print('SBML written to temporary file')
with open(sedml_file, 'wb') as f:
f.write(sedml_str.encode('utf-8'))
f.flush()
print('SED-ML written to temporary file')
import libsedml
sedml_doc = libsedml.readSedML(sedml_file)
n_errors = sedml_doc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR)
print('Read SED-ML file, number of errors: {}'.format(n_errors))
if n_errors > 0:
print(sedml_doc.getErrorLog().toString())
# execute SED-ML using Tellurium
te.executeSEDML(sedml_str, workingDir=workingDir)
# clean up
#shutil.rmtree(workingDir)
from __future__ import print_function
import tellurium as te, tellurium.temiriam as temiriam
te.setDefaultPlottingEngine('matplotlib')
%matplotlib inline
import phrasedml
# Get SBML from URN and set for phrasedml
urn = "urn:miriam:biomodels.db:BIOMD0000000012"
sbml_str = temiriam.getSBMLFromBiomodelsURN(urn=urn)
phrasedml.setReferencedSBML('BIOMD0000000012', sbml_str)
# <SBML species>
# PX - LacI protein
# PY - TetR protein
# PZ - cI protein
# X - LacI mRNA
# Y - TetR mRNA
# Z - cI mRNA
# <SBML parameters>
# ps_a - tps_active: Transcrition from free promotor in transcripts per second and promotor
# ps_0 - tps_repr: Transcrition from fully repressed promotor in transcripts per second and promotor
phrasedml_str =
model1 = model "{}"
model2 = model model1 with ps_0=1.3E-5, ps_a=0.013
sim1 = simulate uniform(0, 1000, 1000)
task1 = run sim1 on model1
task2 = run sim1 on model2
# A simple timecourse simulation
plot "Figure 1.1 Timecourse of repressilator" task1.time vs task1.PX, task1.PZ, task1.PY
# Applying preprocessing
plot "Figure 1.2 Timecourse after pre-processing" task2.time vs task2.PX, task2.PZ, task2.PY
# Applying postprocessing
plot "Figure 1.3 Timecourse after post-processing" task1.PX/max(task1.PX) vs task1.PZ/max(task1.PZ), \
task1.PY/max(task1.PY) vs task1.PX/max(task1.PX), \
task1.PZ/max(task1.PZ) vs task1.PY/max(task1.PY)
.format('BIOMD0000000012')
# convert to SED-ML
sedml_str = phrasedml.convertString(phrasedml_str)
if sedml_str == None:
raise RuntimeError(phrasedml.getLastError())
# Run the SED-ML file with results written in workingDir
import tempfile, shutil, os
workingDir = tempfile.mkdtemp(suffix="_sedml")
# write out SBML
with open(os.path.join(workingDir, 'BIOMD0000000012'), 'wb') as f:
f.write(sbml_str.encode('utf-8'))
te.executeSEDML(sedml_str, workingDir=workingDir)
shutil.rmtree(workingDir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading / Executing SED-ML
Step3: SED-ML L1V2 specification example
|
2,558
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
import cPickle
import numpy as np
from keras import backend as K
from keras.models import Sequential, model_from_yaml
from keras.layers.recurrent import LSTM
from keras.layers.core import Activation, Dense, Dropout, RepeatVector
from keras.layers.wrappers import TimeDistributed
from keras.preprocessing import sequence
import yaml
import os
# Read data
config = yaml.load(open(os.path.join(os.path.expanduser("~"), ".blackbird", "config.yaml")).read())
seq_features = cPickle.load(open(config["data"]["features"], "rb"))
weights_file = config["data"]["model"]["weights"]
arch_file = config["data"]["model"]["arch"]
output_layer = int(config["data"]["model"]["output"])
maxlen = 150
X = np.empty((len(seq_features), maxlen, 20))
for idx, key in enumerate(seq_features):
X[idx, :, :] = sequence.pad_sequences(seq_features[key], maxlen=maxlen, dtype="float32").T
# Create model
model = Sequential()
model.add(LSTM(64, return_sequences=False, input_shape=(maxlen, 20), go_backwards=True))
model.add(Dropout(0.5))
model.add(Dense(20))
model.add(Activation("tanh"))
model.add(RepeatVector(maxlen))
model.add(Dropout(0.5))
model.add(LSTM(64, return_sequences=True, go_backwards=True))
model.add(TimeDistributed(Dense(20)))
model.compile(loss="mse", optimizer="adam")
# Train
history = model.fit(X, X, batch_size=128, nb_epoch=500, validation_split=0.2, verbose=1)
# Use the validation loss curve to stop at a good solution
plt.figure(figsize=(14, 5))
plt.plot(history.history["loss"], label="Training loss")
plt.plot(history.history["val_loss"], label="Validation loss")
plt.legend()
plt.show()
# Save architecture and weights
if os.path.isfile(weights_file):
os.rename(weights_file, weights_file + ".backup")
if os.path.isfile(arch_file):
os.rename(arch_file, arch_file + ".backup")
# Save things
open(arch_file, "w").write(model.to_yaml())
model.save_weights(weights_file)
# Load model
model = model_from_yaml(open(arch_file).read())
model.load_weights(weights_file)
# Function to predict output
predict = K.function([model.layers[0].input, K.learning_phase()],
model.layers[output_layer].output)
# Predict output
test_X = predict([X, 0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read data
Step2: Train
Step3: Load previous model
|
2,559
|
<ASSISTANT_TASK:>
Python Code:
from openpiv import tools, pyprocess, validation, filters, scaling
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import imageio
frame_a = tools.imread( '../../examples/test1/exp1_001_a.bmp' )
frame_b = tools.imread( '../../examples/test1/exp1_001_b.bmp' )
fig,ax = plt.subplots(1,2,figsize=(12,10))
ax[0].imshow(frame_a,cmap=plt.cm.gray);
ax[1].imshow(frame_b,cmap=plt.cm.gray);
winsize = 32 # pixels, interrogation window size in frame A
searchsize = 38 # pixels, search area size in frame B
overlap = 17 # pixels, 50% overlap
dt = 0.02 # sec, time interval between the two frames
u0, v0, sig2noise = pyprocess.extended_search_area_piv(
frame_a.astype(np.int32),
frame_b.astype(np.int32),
window_size=winsize,
overlap=overlap,
dt=dt,
search_area_size=searchsize,
sig2noise_method='peak2peak',
)
x, y = pyprocess.get_coordinates(
image_size=frame_a.shape,
search_area_size=searchsize,
overlap=overlap,
)
u1, v1, mask = validation.sig2noise_val(
u0, v0,
sig2noise,
threshold = 1.05,
)
u2, v2 = filters.replace_outliers(
u1, v1,
method='localmean',
max_iter=3,
kernel_size=3,
)
# convert x,y to mm
# convert u,v to mm/sec
x, y, u3, v3 = scaling.uniform(
x, y, u2, v2,
scaling_factor = 96.52, # 96.52 pixels/millimeter
)
# 0,0 shall be bottom left, positive rotation rate is counterclockwise
x, y, u3, v3 = tools.transform_coordinates(x, y, u3, v3)
tools.save(x, y, u3, v3, mask, 'exp1_001.txt' )
fig, ax = plt.subplots(figsize=(8,8))
tools.display_vector_field(
'exp1_001.txt',
ax=ax, scaling_factor=96.52,
scale=50, # scale defines here the arrow length
width=0.0035, # width is the thickness of the arrow
on_img=True, # overlay on the image
image_name='../../examples/test1/exp1_001_a.bmp',
);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading images
Step2: Processing
Step3: The function get_coordinates finds the center of each interrogation window. This will be useful later on when plotting the vector field.
Step4: Post-processing
Step5: Another useful function is replace_outliers, which will find outlier vectors, and substitute them by an average of neighboring vectors. The larger the kernel_size the larger is the considered neighborhood. This function uses an iterative image inpainting algorithm. The amount of iterations can be chosen via max_iter.
Step6: Next, we are going to convert pixels to millimeters, and flip the coordinate system such that the origin becomes the bottom left corner of the image.
Step7: Results
Step8: Finally, the vector field can be plotted with display_vector_field.
|
2,560
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sys
import os
import shutil
import numpy as np
from subprocess import check_output
# Import flopy
import flopy
# Set the name of the path to the model working directory
dirname = "P4-5_Hubbertville"
datapath = os.getcwd()
modelpath = os.path.join(datapath, dirname)
print 'Name of model path: ', modelpath
# Now let's check if this directory exists. If not, then we will create it.
if os.path.exists(modelpath):
print 'Model working directory already exists.'
else:
print 'Creating model working directory.'
os.mkdir(modelpath)
# model domain and grid definition
# for clarity, user entered variables are all caps; python syntax are lower case or mixed case
# In a contrast to P4.1 and P4.2, this is an areal 2D model
LX = 4500.
LY = 11000. # note that there is an added 500m on the top and bottom to represent the boundary conditions,that leaves an aqufier lenght of 10000 m
ZTOP = 1030. # the system is unconfined so set the top above land surface so that the water table never > layer top
ZBOT = 980.
NLAY = 1
NROW = 22
NCOL = 9
DELR = LX / NCOL # recall that MODFLOW convention is DELR is along a row, thus has items = NCOL; see page XXX in AW&H (2015)
DELC = LY / NROW # recall that MODFLOW convention is DELC is along a column, thus has items = NROW; see page XXX in AW&H (2015)
DELV = (ZTOP - ZBOT) / NLAY
BOTM = np.linspace(ZTOP, ZBOT, NLAY + 1)
HK = 50.
VKA = 1.
RCH = 0.001
WELLQ = 0. #recall MODFLOW convention, negative means pumped out of the model domain (=aquifer)
print "DELR =", DELR, " DELC =", DELC, ' DELV =', DELV
print "BOTM =", BOTM
print "Recharge =", RCH
print "Pumping well rate =", WELLQ
# Assign name and create modflow model object
modelname = 'P4-5'
#exe_name = os.path.join(datapath, 'mfnwt.exe') # for Windows OS
exe_name = os.path.join(datapath, 'mfnwt') # for Mac OS
print 'Model executable: ', exe_name
MF = flopy.modflow.Modflow(modelname, exe_name=exe_name, model_ws=modelpath)
# Create the discretization object
TOP = ZTOP * np.ones((NROW, NCOL),dtype=np.float)
DIS_PACKAGE = flopy.modflow.ModflowDis(MF, NLAY, NROW, NCOL, delr=DELR, delc=DELC,
top=TOP, botm=BOTM[1:], laycbd=0)
# print DIS_PACKAGE #uncomment this on far left to see information about the flopy object
# Variables for the BAS package
IBOUND = np.ones((NLAY, NROW, NCOL), dtype=np.int32) # all nodes are active (IBOUND = 1)
# In this problem we have no specified heads
print IBOUND
STRT = 1000 * np.ones((NLAY, NROW, NCOL), dtype=np.float32) # P4.5a starting head =1000 m throughout model domain
print STRT
BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT)
# print BAS_PACKAGE # uncomment this at far left to see the information about the flopy BAS object
LPF_PACKAGE = flopy.modflow.ModflowUpw(MF, laytyp=1, hk=HK, vka=VKA) # we defined the K and anisotropy at top of file
# print LPF_PACKAGE # uncomment this at far left to see the information about the flopy LPF object
STRESS_PERIOD_DATA = [
[0, 0, 0, -2500], #layer, row, column, Specified flux Q from P4.3 northern BC
[0, 0, 1, -2500], #remember Python indexing is zero based
[0, 0, 2, -2500],
[0, 0, 3, -2500],
[0, 0, 4, -2500],
[0, 0, 5, -2500],
[0, 0, 6, -2500],
[0, 0, 7, -2500],
[0, 0, 8, -2500],
[0, 21, 0, -2500], #layer, row, column, Specified flux Q from P4.3 southern BC
[0, 21, 1, -2500], #remember Python indexing is zero based
[0, 21, 2, -2500],
[0, 21, 3, -2500],
[0, 21, 4, -2500],
[0, 21, 5, -2500],
[0, 21, 6, -2500],
[0, 21, 7, -2500],
[0, 21, 8, -2500]]
WEL_PACKAGE = flopy.modflow.ModflowWel(MF, stress_period_data=STRESS_PERIOD_DATA) # remember python 0 index, layer 0 = layer 1 in MF
#print WEL_PACKAGE # uncomment this at far left to see the information about the flopy WEL object
OC_PACKAGE = flopy.modflow.ModflowOc(MF) # we'll use the defaults for the model output
# print OC_PACKAGE # uncomment this at far left to see the information about the flopy OC object
NWT_PACKAGE = flopy.modflow.ModflowNwt(MF, headtol=0.001, fluxtol=5000, maxiterout=900)
# print NWT_PACKAGE # uncomment this at far left to see the information about the flopy PCG object
#In Problem P4.3 the River and Swamp were specified head BCs. MODFLOW does not include recharge added to a
#a specified head node in its water budget. Now that we've changed them to active nodes (IBOUND=1) these would receive
#recharge, and our model would be out of balance if we simply plugged in the BC fluxes. Try it, you should see that
#perimeter boundary fluxes calculated from P4.3 < recharge specified in P4.5). Therefore, we'll adjust the recharge
#array so that the perimeter BC nodes are given a recharge rate = 0.
RCH_ARRAY = RCH * np.ones((NROW, NCOL), dtype=np.float32) # set the variable RCH specifed above for model domain
RCH_ARRAY[0, :] = 0 #don't forget arrays are zero-based!
RCH_ARRAY[-1, :] = 0 #-1 is Python for last in array
print RCH_ARRAY
RCH_PACKAGE = flopy.modflow.ModflowRch(MF, rech=RCH_ARRAY)
# print RCH_PACKAGE # uncomment this at far left to see the information about the flopy RCH object
#Before writing input, destroy all files in folder to prevent reusing old files
#Here's the working directory
print modelpath
#Here's what's currently in the working directory
modelfiles = os.listdir(modelpath)
print modelfiles
#delete these files to prevent us from reading old results
modelfiles = os.listdir(modelpath)
for filename in modelfiles:
f = os.path.join(modelpath, filename)
if modelname in f:
try:
os.remove(f)
print 'Deleted: ', filename
except:
print 'Unable to delete: ', filename
#Now write the model input files
MF.write_input()
# return current working directory
print "You can check the newly created files in", modelpath
silent = False #Print model output to screen?
pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook
report = True #Store the output from the model in buff
success, buff = MF.run_model(silent=silent, pause=pause, report=report)
#imports for plotting and reading the MODFLOW binary output file
import matplotlib.pyplot as plt
import flopy.utils.binaryfile as bf
#Create the headfile object and grab the results for last time.
headfile = os.path.join(modelpath, modelname + '.hds')
headfileobj = bf.HeadFile(headfile)
#Get a list of times that are contained in the model
times = headfileobj.get_times()
print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times
#Get a numpy array of heads for totim = 1.0
#The get_data method will extract head data from the binary file.
HEAD = headfileobj.get_data(totim=1.0)
#Print statistics on the head
print 'Head statistics'
print ' min: ', HEAD.min()
print ' max: ', HEAD.max()
print ' std: ', HEAD.std()
#Create a contour plot of heads
FIG = plt.figure(figsize=(15,13))
#setup contour levels and plot extent
LEVELS = np.arange(989., 1005., 0.5)
EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.)
print 'Contour Levels: ', LEVELS
print 'Extent of domain: ', EXTENT
#Make a contour plot on the first axis
AX1 = FIG.add_subplot(1, 2, 1, aspect='equal')
AX1.set_xlabel("x")
AX1.set_ylabel("y")
YTICKS = np.arange(0, 11000, 500)
AX1.set_yticks(YTICKS)
AX1.set_title("Hubbertville contour map")
AX1.text(2000, 10500, r"River", fontsize=10, color="blue")
AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green")
AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT)
#Make a color flood on the second axis
AX2 = FIG.add_subplot(1, 2, 2, aspect='equal')
AX2.set_xlabel("x")
AX2.set_ylabel("y")
AX2.set_yticks(YTICKS)
AX2.set_title("Hubbertville color flood")
AX2.text(2000, 10500, r"River", fontsize=10, color="black")
AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black")
cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest')
cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45)
#look at the head in column = 4 from headobj, and then plot it
#print HEAD along a column; COL is a variable that allows us to change this easily
COL = 4
print HEAD[0,:,COL]
# we see this is what we want, but is flipped because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it
Y = np.flipud(HEAD[0,:,COL])
print Y
#for our cross section create X-coordinates to match with heads
XCOORD = np.arange(0, 11000, 500) + 250
print XCOORD
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
TITLE = 'cross section of head along Column = ({0})'.format(COL)
ax.set_title(TITLE)
ax.set_xlabel('y')
ax.set_ylabel('head')
ax.set_xlim(0, 11000.)
ax.set_ylim(980.,1020.)
ax.text(10480, 998, r"River", fontsize=10, color="blue",rotation='vertical')
ax.text(300, 998, r"Green Swamp", fontsize=10, color="green",rotation='vertical')
ax.text(5300,1009., r"Groundwater Divide", fontsize=10, color="black",rotation='vertical')
ax.plot(XCOORD, Y)
#in order to keep the problem unconfined we'll have to raise the top above what was set in P4.3 (=1030 m)
ZTOP = 2030.
STRT = 2000 * np.ones((NLAY, NROW, NCOL), dtype=np.float32) # P4.5a starting head=2000 m throughout model domain
print STRT
#we have to update the MODFLOW's BAS Package with the new IBOUND array
BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT)
#in order to keep the problem unconfined we'll have to raise the top above what was set in P4.3 (=1030 m)
TOP = 2030.
#in order to keep the same transmissivity we need to raise the base so the resulting saturated thickness is the same
BOT = 1980.
DIS_PACKAGE = flopy.modflow.ModflowDis(MF, NLAY, NROW, NCOL, delr=DELR, delc=DELC,
top=TOP, botm=BOT, laycbd=0)
# print DIS_PACKAGE #uncomment this on far left to see information about the flopy object
#delete these files to prevent us from reading old results
modelfiles = os.listdir(modelpath)
for filename in modelfiles:
f = os.path.join(modelpath, filename)
if modelname in f:
try:
os.remove(f)
print 'Deleted: ', filename
except:
print 'Unable to delete: ', filename
#Now write the model input files
MF.write_input()
print "New MODFLOW input files = ", modelfiles
print "You can check the newly created files in", modelpath
#rerun MODFLOW-NWT
silent = False #Print model output to screen?
pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook
report = True #Store the output from the model in buff
success, buff = MF.run_model(silent=silent, pause=pause, report=report)
#As before, let's look at the results and compare to P4-3 Part a.
#imports for plotting and reading the MODFLOW binary output file
import matplotlib.pyplot as plt
import flopy.utils.binaryfile as bf
#Create the headfile object and grab the results for last time.
headfile = os.path.join(modelpath, modelname + '.hds')
headfileobj = bf.HeadFile(headfile)
#Get a list of times that are contained in the model
times = headfileobj.get_times()
print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times
#Get a numpy array of heads for totim = 1.0
#The get_data method will extract head data from the binary file.
HEAD = headfileobj.get_data(totim=1.0)
#Print statistics on the head
print 'Head statistics'
print ' min: ', HEAD.min()
print ' max: ', HEAD.max()
print ' std: ', HEAD.std()
#-999.99 is the Inactive node flag so we'll use our previous contour settings
#Create a contour plot of heads
FIG = plt.figure(figsize=(15,13))
#setup contour levels and plot extent
LEVELS = np.arange(1989., 2005., 0.5)
EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.)
print 'Contour Levels: ', LEVELS
print 'Extent of domain: ', EXTENT
#Make a contour plot on the first axis
AX1 = FIG.add_subplot(1, 2, 1, aspect='equal')
AX1.set_xlabel("x")
AX1.set_ylabel("y")
YTICKS = np.arange(0, 11000, 500)
AX1.set_yticks(YTICKS)
AX1.set_title("Hubbertville contour map")
AX1.text(2000, 10500, r"River", fontsize=10, color="blue")
AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green")
AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT)
#Make a color flood on the second axis
AX2 = FIG.add_subplot(1, 2, 2, aspect='equal')
AX2.set_xlabel("x")
AX2.set_ylabel("y")
AX2.set_yticks(YTICKS)
AX2.set_title("Hubbertville color flood")
AX2.text(2000, 10500, r"River", fontsize=10, color="black")
AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black")
cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest', vmin=1989)
cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45)
COL = 4
# recall we need to flip because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it
Y = np.flipud(HEAD[0,:,COL])
print Y
#for our cross section create X-coordinates to match with heads
XCOORD = np.arange(0, 11000, 500) + 250
print XCOORD
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
TITLE = 'cross section of head along Column = ({0})'.format(COL)
ax.set_title(TITLE)
ax.set_xlabel('y')
ax.set_ylabel('head')
ax.set_xlim(0, 11000.)
ax.set_ylim(1980.,2020.)
ax.text(10480, 1998, r"River", fontsize=10, color="blue",rotation='vertical')
ax.text(300, 1998, r"Green Swamp", fontsize=10, color="green",rotation='vertical')
ax.text(5400,2006., r"Groundwater Divide", fontsize=10, color="black",rotation='vertical')
ax.plot(XCOORD, Y)
# let's use the first model of Part a so that the starting head of 1000 m is not below the bottom of the aquifer
TOP = 1030.
BOT = 980.
#we'll set the cell in the first row and first column to a specified head (changing it's IBOUND value to -1)
IBOUND[:, 0, 0] = -1
print IBOUND
#change the starting head to equal 1000 m
STRT = 1000 * np.ones((NLAY, NROW, NCOL), dtype=np.float32)
print STRT
#we have to update MODFLOW's BAS Package with the new IBOUND and STRT heads
BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT)
print 'aquifer top =', TOP
print 'aquifer bottom =', BOT
#we have to update MODFLOW's DIS Package for the new tops and bottoms
DIS_PACKAGE = flopy.modflow.ModflowDis(MF, NLAY, NROW, NCOL, delr=DELR, delc=DELC,
top=TOP, botm=BOT, laycbd=0)
#delete old files to prevent us from reading old results
modelfiles = os.listdir(modelpath)
for filename in modelfiles:
f = os.path.join(modelpath, filename)
if modelname in f:
try:
os.remove(f)
print 'Deleted: ', filename
except:
print 'Unable to delete: ', filename
#Now write the model input files
MF.write_input()
print "New MODFLOW input files = ", modelfiles
print "You can check the newly created files in", modelpath
#rerun MODFLOW-2005
silent = False #Print model output to screen?
pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook
report = True #Store the output from the model in buff
success, buff = MF.run_model(silent=silent, pause=pause, report=report)
#As before, let's look at the results and compare to P4-3 Part a.
#imports for plotting and reading the MODFLOW binary output file
import matplotlib.pyplot as plt
import flopy.utils.binaryfile as bf
#Create the headfile object and grab the results for last time.
headfile = os.path.join(modelpath, modelname + '.hds')
headfileobj = bf.HeadFile(headfile)
#Get a list of times that are contained in the model
times = headfileobj.get_times()
print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times
#Get a numpy array of heads for totim = 1.0
#The get_data method will extract head data from the binary file.
HEAD = headfileobj.get_data(totim=1.0)
#Print statistics on the head
print 'Head statistics'
print ' min: ', HEAD.min()
print ' max: ', HEAD.max()
print ' std: ', HEAD.std()
#Create a contour plot of heads
FIG = plt.figure(figsize=(15,13))
#setup contour levels and plot extent
LEVELS = np.arange(1000., 1011., 0.5)
EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.)
#Make a contour plot on the first axis
AX1 = FIG.add_subplot(1, 2, 1, aspect='equal')
AX1.set_xlabel("x")
AX1.set_ylabel("y")
YTICKS = np.arange(0, 11000, 500)
AX1.set_yticks(YTICKS)
AX1.set_title("Hubbertville contour map")
AX1.text(2000, 10500, r"River", fontsize=10, color="blue")
AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green")
AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT)
#Make a color flood on the second axis
AX2 = FIG.add_subplot(1, 2, 2, aspect='equal')
AX2.set_xlabel("x")
AX2.set_ylabel("y")
AX2.set_yticks(YTICKS)
AX2.set_title("Hubbertville color flood")
AX2.text(2000, 10500, r"River", fontsize=10, color="black")
AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black")
cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest', vmin=998.2)
cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45)
#as before let's plot a north-south cross section
COL = 4
# recall we need to flip because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it
Y = np.flipud(HEAD[0,:,COL])
#for our cross section create X-coordinates to match with heads
XCOORD = np.arange(0, 11000, 500) + 250
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
TITLE = 'cross section of head along Column = ({0})'.format(COL)
ax.set_title(TITLE)
ax.set_xlabel('y')
ax.set_ylabel('head')
ax.set_xlim(0, 11000.)
ax.set_ylim(980.,1020.)
ax.text(10480, 998, r"River", fontsize=10, color="blue",rotation='vertical')
ax.text(300, 998, r"Green Swamp", fontsize=10, color="green",rotation='vertical')
ax.text(5400,1007., r"Groundwater Divide", fontsize=10, color="black",rotation='vertical')
ax.plot(XCOORD, Y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup a New Directory and Change Paths
Step2: Define the Model Extent, Grid Resolution, and Characteristics
Step3: Create the MODFLOW Model Object
Step4: Discretization Package
Step5: Basic Package
Step6: Upstream Weighting Package (like Layer Property Flow Package)
Step7: Well Package
Step8: Output Control
Step9: Newton-Raphson Solver
Step10: Recharge Package
Step11: Writing the MODFLOW Input Files
Step12: Yup. It's that simple, the model datasets are written using a single command (mf.write_input).
Step13: Running the Model
Step14: Post Processing the Results
Step15: Look at the bottom of the MODFLOW output file (ending with a *.list) and note the water balance reported.
Step16: Now Change Starting Heads to 2000 m
Step17: How do the two versions of starting head compare? What does this conceptualization need to keep this problem from being a relative result?
|
2,561
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
# include all Shogun classes
from modshogun import *
# generate some ultra easy training data
gray()
n=20
title('Toy data for binary classification')
X=hstack((randn(2,n), randn(2,n)+1))
Y=hstack((-ones(n), ones(n)))
_=scatter(X[0], X[1], c=Y , s=100)
p1 = Rectangle((0, 0), 1, 1, fc="w")
p2 = Rectangle((0, 0), 1, 1, fc="k")
legend((p1, p2), ["Class 1", "Class 2"], loc=2)
# training data in Shogun representation
features=RealFeatures(X)
labels=BinaryLabels(Y)
k=5
normal_split=CrossValidationSplitting(labels, k)
stratified_split=StratifiedCrossValidationSplitting(labels, k)
split_strategies=[stratified_split, normal_split]
#code to visualize splitting
def get_folds(split, num):
split.build_subsets()
x=[]
y=[]
lab=[]
for j in range(num):
indices=split.generate_subset_indices(j)
x_=[]
y_=[]
lab_=[]
for i in range(len(indices)):
x_.append(X[0][indices[i]])
y_.append(X[1][indices[i]])
lab_.append(Y[indices[i]])
x.append(x_)
y.append(y_)
lab.append(lab_)
return x, y, lab
def plot_folds(split_strategies, num):
for i in range(len(split_strategies)):
x, y, lab=get_folds(split_strategies[i], num)
figure(figsize=(18,4))
gray()
suptitle(split_strategies[i].get_name(), fontsize=12)
for j in range(0, num):
subplot(1, num, (j+1), title='Fold %s' %(j+1))
scatter(x[j], y[j], c=lab[j], s=100)
_=plot_folds(split_strategies, 4)
# define SVM with a small rbf kernel (always normalise the kernel!)
C=1
kernel=GaussianKernel(2, 0.001)
kernel.init(features, features)
kernel.set_normalizer(SqrtDiagKernelNormalizer())
classifier=LibSVM(C, kernel, labels)
# train
_=classifier.train()
# instanciate a number of Shogun performance measures
metrics=[ROCEvaluation(), AccuracyMeasure(), ErrorRateMeasure(), F1Measure(), PrecisionMeasure(), RecallMeasure(), SpecificityMeasure()]
for metric in metrics:
print metric.get_name(), metric.evaluate(classifier.apply(features), labels)
metric=AccuracyMeasure()
cross=CrossValidation(classifier, features, labels, stratified_split, metric)
# perform the cross-validation, note that this call involved a lot of computation
result=cross.evaluate()
# the result needs to be casted to CrossValidationResult
result=CrossValidationResult.obtain_from_generic(result)
# this class contains a field "mean" which contain the mean performance metric
print "Testing", metric.get_name(), result.mean
print "Testing", metric.get_name(), [CrossValidationResult.obtain_from_generic(cross.evaluate()).mean for _ in range(10)]
# 25 runs and 95% confidence intervals
cross.set_num_runs(25)
# perform x-validation (now even more expensive)
cross.evaluate()
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
print "Testing cross-validation mean %.2f " \
% (result.mean)
widths=2**linspace(-5,25,10)
results=zeros(len(widths))
for i in range(len(results)):
kernel.set_width(widths[i])
result=CrossValidationResult.obtain_from_generic(cross.evaluate())
results[i]=result.mean
plot(log2(widths), results, 'blue')
xlabel("log2 Kernel width")
ylabel(metric.get_name())
_=title("Accuracy for different kernel widths")
print "Best Gaussian kernel width %.2f" % widths[results.argmax()], "gives", results.max()
# compare this with a linear kernel
classifier.set_kernel(LinearKernel())
lin_k=CrossValidationResult.obtain_from_generic(cross.evaluate())
plot([log2(widths[0]), log2(widths[len(widths)-1])], [lin_k.mean,lin_k.mean], 'r')
# please excuse this horrible code :)
print "Linear kernel gives", lin_k.mean
_=legend(["Gaussian", "Linear"], loc="lower center")
feats=RealFeatures(CSVFile('../../../data/uci/housing/fm_housing.dat'))
labels=RegressionLabels(CSVFile('../../../data/uci/housing/housing_label.dat'))
preproc=RescaleFeatures()
preproc.init(feats)
feats.add_preprocessor(preproc)
feats.apply_preprocessor(True)
#Regression models
ls=LeastSquaresRegression(feats, labels)
tau=1
rr=LinearRidgeRegression(tau, feats, labels)
width=1
tau=1
kernel=GaussianKernel(feats, feats, width)
kernel.set_normalizer(SqrtDiagKernelNormalizer())
krr=KernelRidgeRegression(tau, kernel, labels)
regression_models=[ls, rr, krr]
n=30
taus = logspace(-4, 1, n)
#5-fold cross-validation
k=5
split=CrossValidationSplitting(labels, k)
metric=MeanSquaredError()
cross=CrossValidation(rr, feats, labels, split, metric)
cross.set_num_runs(50)
errors=[]
for tau in taus:
#set necessary parameter
rr.set_tau(tau)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
#Enlist mean error for all runs
errors.append(result.mean)
figure(figsize=(20,6))
suptitle("Finding best (tau) parameter using cross-validation", fontsize=12)
p=subplot(121)
title("Ridge Regression")
plot(taus, errors, linewidth=3)
p.set_xscale('log')
p.set_ylim([0, 80])
xlabel("Taus")
ylabel("Mean Squared Error")
cross=CrossValidation(krr, feats, labels, split, metric)
cross.set_num_runs(50)
errors=[]
for tau in taus:
krr.set_tau(tau)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
#print tau, "error", result.mean
errors.append(result.mean)
p2=subplot(122)
title("Kernel Ridge regression")
plot(taus, errors, linewidth=3)
p2.set_xscale('log')
xlabel("Taus")
_=ylabel("Mean Squared Error")
n=50
widths=logspace(-2, 3, n)
krr.set_tau(0.1)
metric=MeanSquaredError()
k=5
split=CrossValidationSplitting(labels, k)
cross=CrossValidation(krr, feats, labels, split, metric)
cross.set_num_runs(10)
errors=[]
for width in widths:
kernel.set_width(width)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
#print width, "error", result.mean
errors.append(result.mean)
figure(figsize=(15,5))
p=subplot(121)
title("Finding best width using cross-validation")
plot(widths, errors, linewidth=3)
p.set_xscale('log')
xlabel("Widths")
_=ylabel("Mean Squared Error")
n=40
taus = logspace(-3, 0, n)
widths=logspace(-1, 4, n)
cross=CrossValidation(krr, feats, labels, split, metric)
cross.set_num_runs(1)
x, y=meshgrid(taus, widths)
grid=array((ravel(x), ravel(y)))
print grid.shape
errors=[]
for i in range(0, n*n):
krr.set_tau(grid[:,i][0])
kernel.set_width(grid[:,i][1])
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
errors.append(result.mean)
errors=array(errors).reshape((n, n))
from mpl_toolkits.mplot3d import Axes3D
#taus = logspace(0.5, 1, n)
jet()
fig=figure(figsize(15,7))
ax=subplot(121)
c=pcolor(x, y, errors)
_=contour(x, y, errors, linewidths=1, colors='black')
_=colorbar(c)
xlabel('Taus')
ylabel('Widths')
ax.set_xscale('log')
ax.set_yscale('log')
ax1=fig.add_subplot(122, projection='3d')
ax1.plot_wireframe(log10(y),log10(x), errors, linewidths=2, alpha=0.6)
ax1.view_init(30,-40)
xlabel('Taus')
ylabel('Widths')
_=ax1.set_zlabel('Error')
#use the best parameters
rr.set_tau(1)
krr.set_tau(0.05)
kernel.set_width(2)
title_='Performance on Boston Housing dataset'
print "%50s" %title_
for machine in regression_models:
metric=MeanSquaredError()
cross=CrossValidation(machine, feats, labels, split, metric)
cross.set_num_runs(25)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
print "-"*80
print "|", "%30s" % machine.get_name(),"|", "%20s" %metric.get_name(),"|","%20s" %result.mean ,"|"
print "-"*80
#Root
param_tree_root=ModelSelectionParameters()
#Parameter tau
tau=ModelSelectionParameters("tau")
param_tree_root.append_child(tau)
# also R_LINEAR/R_LOG is available as type
min_value=0.01
max_value=1
type_=R_LINEAR
step=0.05
base=2
tau.build_values(min_value, max_value, type_, step, base)
#kernel object
param_gaussian_kernel=ModelSelectionParameters("kernel", kernel)
gaussian_kernel_width=ModelSelectionParameters("log_width")
gaussian_kernel_width.build_values(0.1, 6.0, R_LINEAR, 0.5, 2.0)
#kernel parameter
param_gaussian_kernel.append_child(gaussian_kernel_width)
param_tree_root.append_child(param_gaussian_kernel)
# cross validation instance used
cross_validation=CrossValidation(krr, feats, labels, split, metric)
cross_validation.set_num_runs(1)
# model selection instance
model_selection=GridSearchModelSelection(cross_validation, param_tree_root)
print_state=False
# TODO: enable it once crossval has been fixed
#best_parameters=model_selection.select_model(print_state)
#best_parameters.apply_to_machine(krr)
#best_parameters.print_tree()
result=cross_validation.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
print 'Error with Best parameters:', result.mean
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Types of splitting strategies
Step2: Stratified cross-validation
Step3: Leave One Out cross-validation
Step4: Stratified splitting takes care that each fold has almost the same number of samples from each class. This is not the case with normal splitting which usually leads to imbalanced folds.
Step5: Ok, we now have performed classification on the training data. How good did this work? We can easily do this for many different performance measures.
Step6: Note how for example error rate is 1-accuracy. All of those numbers represent the training error, i.e. the ability of the classifier to explain the given data.
Step7: Now this is incredibly bad compared to the training error. In fact, it is very close to random performance (0.5). The lesson
Step8: It is better to average a number of different runs of cross-validation in this case. A nice side effect of this is that the results can be used to estimate error intervals for a given confidence rate.
Step9: Using this machinery, it is very easy to compare multiple kernel parameters against each other to find the best one. It is even possible to compare a different kernel.
Step10: This gives a brute-force way to select paramters of any algorithm implemented under the CMachine interface. The cool thing about this is, that it is also possible to compare different model families against each other. Below, we compare a a number of regression models in Shogun on the Boston Housing dataset.
Step11: Let us use cross-validation to compare various values of tau paramter for ridge regression (Regression notebook). We will use MeanSquaredError as the performance metric. Note that normal splitting is used since it might be impossible to generate "good" splits using Stratified splitting in case of regression since we have continous values for labels.
Step12: A low value of error certifies a good pick for the tau paramter which should be easy to conclude from the plots. In case of Ridge Regression the value of tau i.e. the amount of regularization doesn't seem to matter but does seem to in case of Kernel Ridge Regression. One interpretation of this could be the lack of over fitting in the feature space for ridge regression and the occurence of over fitting in the new kernel space in which Kernel Ridge Regression operates. </br> Next we will compare a range of values for the width of Gaussian Kernel used in Kernel Ridge Regression
Step13: The values for the kernel parameter and tau may not be independent of each other, so the values we have may not be optimal. A brute force way to do this would be to try all the pairs of these values but it is only feasible for a low number of parameters.
Step14: Let us approximately pick the good parameters using the plots. Now that we have the best parameters, let us compare the various regression models on the data set.
Step15: Model selection using Grid Search
Step16: Next we will create CModelSelectionParameters instance with a kernel object which has to be appended the root node. The kernel object itself will be append with a kernel width parameter which is the parameter we wish to search.
|
2,562
|
<ASSISTANT_TASK:>
Python Code:
# Load image
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Load images
image_bgr = cv2.imread('images/plane_256x256.jpg')
image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)
# Number of corners to detect
corners_to_detect = 10
minimum_quality_score = 0.05
minimum_distance = 25
# Detect corners
corners = cv2.goodFeaturesToTrack(image_gray,
corners_to_detect,
minimum_quality_score,
minimum_distance)
corners = np.float32(corners)
# Draw white circle at each corner
for corner in corners:
x, y = corner[0]
cv2.circle(image_bgr, (x,y), 10, (255,255,255), -1)
# Convert to grayscale
image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)
# Show image
plt.imshow(image_gray, cmap='gray'), plt.axis("off")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load image
Step2: Define Corner Parameters
Step3: Detect Corners
Step4: Mark Corners
Step5: View Image
|
2,563
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, silhouette_samples
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
# processing .csv containing county statistics
counties = pd.read_csv('county_facts.csv')
drop_columns = ["state_abbreviation", "fips"]
counties.drop(drop_columns,inplace=True,axis=1)
# combine it with .csv containing primary statistics
primary = pd.read_csv('primary_results.csv')
primary = pd.concat([primary,counties], axis=1)
trump = primary[primary['candidate'] == 'Donald Trump'].sort_index()
# drop the features we don't need
drop_columns = ["state_abbreviation", "party", "candidate","area_name"]
trump.drop(drop_columns,inplace=True,axis=1)
# get rid of counties with no statistical data
trump = trump.fillna(0.0)
trump = trump[trump['POP010210'] > 0]
trump.head()
state = trump["state"]
county = trump["county"]
# any of the features in the trump dataframe can be used, these were chosen because they seemed interesting
# percent who voted for donald trump
fraction_votes = trump["fraction_votes"]
fraction_votes_norm = np.array((fraction_votes - fraction_votes.min()) / (fraction_votes.max() - fraction_votes.min())).reshape(-1,1)
# median household income of the country
median_income = trump["INC110213"]
median_income_norm = np.array((median_income - median_income.min()) / (median_income.max() - median_income.min())).reshape(-1,1)
# percent of people in the county who were born outside of america
foreign_born = trump["POP645213"]
foreign_born_norm = np.array((foreign_born - foreign_born.min()) / (foreign_born.max() - foreign_born.min())).reshape(-1,1)
# percent of people in the county who graduated high school
high_school = trump["EDU635213"]
high_school_norm = np.array((high_school - high_school.min()) / (high_school.max() - high_school.min())).reshape(-1,1)
# percent of people in the county with a bachelors degree
bachelors = trump["EDU685213"]
bachelors_norm = np.array((bachelors - bachelors.min()) / (bachelors.max() - bachelors.min())).reshape(-1,1)
# the features to be used in k-Means are added to 2-D arrays
trump_norm = np.hstack((high_school_norm, median_income_norm))
# graphs of the normalized data
f, axarr = plt.subplots(2, 2)
axarr[0,0].set_title('Income and Trump Votes')
axarr[0,0].scatter(median_income_norm, fraction_votes_norm, c='red')
axarr[0,1].set_title('Foreigners and Trump Votes')
axarr[0,1].scatter(foreign_born_norm, fraction_votes_norm, c='green')
axarr[1,0].set_title('College and Trump Votes')
axarr[1,0].scatter(bachelors_norm, fraction_votes_norm, c='blue')
axarr[1,1].set_title('High School and Trump Votes')
axarr[1,1].scatter(high_school_norm, fraction_votes_norm, c='yellow')
plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False)
plt.show()
best_nc = 0
best_ss = 0
for n_clusters in range(2,10):
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(trump_norm)
silhouette_avg = silhouette_score(trump_norm, cluster_labels)
print("For", n_clusters,"clusters, the average silhouette score is", silhouette_avg)
if silhouette_avg > best_ss:
best_nc = n_clusters
best_ss = silhouette_avg
print("The best number of clusters is",best_nc)
kmeans = KMeans(n_clusters=best_nc, random_state=10)
kmeans.fit(trump_norm)
h = .02
x_min, x_max = trump_norm[:, 0].min() - 1, trump_norm[:, 0].max() + 0.5
y_min, y_max = trump_norm[:, 1].min() - 1, trump_norm[:, 1].max() + 0.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(trump_norm[:, 0], trump_norm[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=100, linewidths=3,
color='w', zorder=10)
plt.title('K-means Clustering on Primary Results')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing the .csv files and processing them into a single dataframe.
Step2: This creates a dataframe containing all of the counties where Trump won. Now the data has to be normalized.
Step3: Graphs showing the relationships between some of these features and the election results are displayed below.
Step4: With the data normalized the k-Means algorithm can be run on it. In order to find the optimal number of clusters the silhouette score was calculated for different numbers.
Step5: With the optimal number of clusters, the most accurate model can be created.
Step6: The results of the k-Means algorithm are plotted below. The code for plotting is taken from the sklearn documentation.
|
2,564
|
<ASSISTANT_TASK:>
Python Code:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
#reading in an image
#image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
#print('This image is:', type(image), 'with dimensions:', image.shape)
#plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
import math
def grayscale(img):
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
Applies the Canny transform
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
Applies a Gaussian Noise kernel
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[0, 0, 255], thickness=10):
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
x_points = []
y_points = []
x_right = []
y_right = []
x_left = []
y_left = []
for line in lines:
for x1,y1,x2,y2 in line:
x_points.append(x1)
x_points.append(x2)
y_points.append(y1)
y_points.append(y2)
slope = ((y2 - y1) / (x2 - x1))
if (x2-x1) != 0:
if slope < -0.5 and slope > -0.8: #>= 0: #Right lane
#if slope < -0.2 and slope > -0.8: #Switch comments with the line above to also see the challenge output! not optimal, but it works fairly!
x_left.extend((x1, x2))
y_left.extend((y1, y2))
elif slope > 0.5 and slope < 0.8: #Left lane
#elif slope > 0.2 and slope < 0.8: #Switch comments with the line above to also see the challenge output! Be advised: Video 1 & 2 output quality will drop!
x_right.extend((x1, x2))
y_right.extend((y1, y2))
#for the left ---------------------------------------------------------
fit_line_left = np.polyfit(x_left, y_left,1)
fit_function_left = np.poly1d(fit_line_left)
min_x_left = min(x_left)
max_x_left = max(x_left)
# y = mx + c to calculate x values for desired y values 540 (bottom of picture) and 320, under horizon
left_y1 = 540
left_x1 = int(round((left_y1 - fit_line_left[1]) / fit_line_left[0]))
left_y2 = 320
left_x2 = int(round((left_y2 - fit_line_left[1]) / fit_line_left[0]))
point_1_left = (left_x1), (left_y1)
point_2_left = (left_x2), (left_y2)
# now draw the left line
cv2.line(img, point_1_left, point_2_left, color, thickness)
#for the right ---------------------------------------------------------
fit_line_right = np.polyfit(x_right, y_right,1)
fit_function_right = np.poly1d(fit_line_right)
min_y_right = min(y_right)
max_y_right = max(y_right)
# y = mx + c to calculate x values for desired y values
right_y1 = 540
right_x1 = int(round((right_y1 - fit_line_right[1]) / fit_line_right[0]))
right_y2 = 320
right_x2 = int(round((right_y2 - fit_line_right[1]) / fit_line_right[0]))
point_1_right = (right_x1), (right_y1)
point_2_right = (right_x2), (right_y2)
# now draw the right line
cv2.line(img, point_1_right, point_2_right, color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
os.listdir("test_images/")
#Pipeline for images
#reading in an image
#image = mpimg.imread('test_images/solidWhiteCurve.jpg')
#image = mpimg.imread('test_images/solidWhiteRight.jpg')
image = mpimg.imread('test_images/solidYellowCurve.jpg')
#image = mpimg.imread('test_images/solidYellowCurve2.jpg')
#image = mpimg.imread('test_images/solidYellowLeft.jpg')
#image = mpimg.imread('test_images/whiteCarLaneSwitch.jpg')
#image = mpimg.imread('test_images/challenge.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
plt.show()
#make it gray and do gaussian and canny
gray = grayscale(image)
blur_gray = gaussian_blur(gray, 3)
edges = canny(blur_gray, 100, 200)
plt.imshow(edges, cmap='gray')
plt.show()
#make a mask and do hough transform
imshape = image.shape
vertices = np.array([[(0, imshape[0]),(430, 320), (500, 320), (imshape[1], imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
plt.imshow(masked_edges, cmap='gray')
plt.show()
hough_image = hough_lines(masked_edges, 3, np.pi/180, 30, 10, 5)
plt.imshow(hough_image, cmap='gray')
plt.show()
weighted_image = weighted_img(hough_image, image, α=0.8, β=1., λ=0.)
#show and save
plt.imshow(weighted_image, cmap='gray')
plt.show()
cv2.imwrite('test_images/result1.jpg', gray)
cv2.imwrite('test_images/result2.jpg', blur_gray)
cv2.imwrite('test_images/result3.jpg', edges)
cv2.imwrite('test_images/result4.jpg', masked_edges)
cv2.imwrite('test_images/result2.jpg', hough_image)
cv2.imwrite('test_images/result2.jpg', weighted_image)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def pipeline(img):
# NOTE: The output you return should be a color image (3 channel) for processing video below
gray = grayscale(img)
blur_gray = gaussian_blur(gray, 3)
edges = canny(blur_gray, 100, 150)
imshape = img.shape
vertices = np.array([[(0, imshape[0]),(430, 320), (500, 320), (imshape[1], imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
hough_image = hough_lines(masked_edges, 3, np.pi/180, 30, 10, 5)
img = weighted_img(hough_image, img, α=0.8, β=1., λ=0.)
return img
def process_image(image):
result = pipeline(image)
return result
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(white_output))
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(yellow_output))
challenge_output = 'test_videos_output/challenge.mp4'
clip3 = VideoFileClip('test_videos/challenge.mp4')
clip_resized = clip3.resize(height=540) # Resize clip to fit current pipeline
clip_resized.write_videofile("test_videos/challenge_resized.mp4", audio=False) #write the resized file to a new file
clip4 = VideoFileClip('test_videos/challenge_resized.mp4') #now read this file and do the math
challenge_clip = clip4.fl_image(process_image)
#It works if in the lines_draw function, the statement for the slope is lowered from 0.5 to 0.2 and from -0.5 to -0.2.
#If then also the max line lenght from hough transform is lowered to 15, the result is already quite ok
#Due to time constrains i didn't work it out but this could be smoothened with a frame-by-frame comparrisson
#If the treshold of the lowest slope is too high, the array turns up empty and the polyfit function won't work
#So if in case of an empty array, the previous one would be picked for that frame until a valid array value would be found in
#a next frame, it would be way more stable.
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(challenge_output))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in an Image
Step9: Ideas for Lane Detection Pipeline
Step10: Test Images
Step11: Build a Lane Finding Pipeline
Step12: Test on Videos
Step13: Let's try the one with the solid white lane on the right first ...
Step15: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
Step17: Improve the draw_lines() function
Step19: Writeup and Submission
|
2,565
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from quimb.tensor import *
from quimb import *
import numpy as np
# the initial state
n = 50
cyclic = False
chi = 4 # intial bond dimension
psi = MPS_rand_state(n, chi, cyclic=cyclic, tags='KET', dtype='complex128')
# the gates
n_gates = 5 * n
gates = [rand_uni(4) for _ in range(n_gates)]
u_tags = [f'U{i}' for i in range(n_gates)]
for U, t in zip(gates, u_tags):
# generate a random coordinate
i = np.random.randint(0, n - int(not cyclic))
# apply the next gate to the coordinate
# propagate_tags='sites' (the default in fact) specifies that the
# new gate tensor should inherit the site tags from tensors it acts on
psi.gate_(U, where=[i, i + 1], tags=t, propagate_tags='sites')
psi.graph(color=['KET'])
fix = {
# [key - tags that uniquely locate a tensor]: [val - (x, y) coord]
**{('KET', f'I{i}'): (i, +10) for i in range(n)},
# can also use a external index, 'k0' etc, as a key to fix it
**{f'k{i}': (i, -10) for i in range(n)},
}
psi.graph(fix=fix, k=0.001, color=['I5', 'I15', 'I25', 'I35', 'I45'])
psiH = psi.H
psiH.retag_({'KET': 'BRA'}) # specify this to distinguish
norm = (psiH | psi)
norm.add_tag('UGs', where=u_tags, which='any')
norm.add_tag('VEC0', where=u_tags, which='!any')
norm.graph(color=['VEC0', 'UGs'])
fix = {
**{(f'I{i}', 'KET', 'VEC0'): (i, -20) for i in range(n)},
**{(f'I{i}', 'BRA', 'VEC0'): (i, +20) for i in range(n)},
}
(psiH | psi).graph(
color=['VEC0', 'UGs', 'I5', 'I15', 'I25', 'I35', 'I45'],
node_size=30,
iterations=500,
fix=fix, k=0.0001)
# this calculates an opimized path for the contraction, which is cached
# the path can also be inspected with `print(expr)`
expr = (psi.H | psi).contract(all, get='path-info')
%%time
(psi.H | psi) ^ all
# make a 'bra' vector copy with 'upper' indices
psiH = psi.H
psiH.retag_({'KET': 'BRA'})
# this automatically reindexes the TN
psiH.site_ind_id = 'b{}'
# define two subsystems
sysa = range(15, 35)
sysb = [i for i in range(n) if i not in sysa]
# join indices for sysb only
psi.reindex_sites('dummy_ptr{}', sysb, inplace=True)
psiH.reindex_sites('dummy_ptr{}', sysb, inplace=True)
rho_ab = (psiH | psi)
rho_ab
fix = {
**{f'k{i}': (i, -10) for i in range(n)},
**{(f'I{i}', 'KET', 'VEC0'): (i, 0) for i in range(n)},
**{(f'I{i}', 'BRA', 'VEC0'): (i, 10) for i in range(n)},
**{f'b{i}': (i, 20) for i in range(n)},
}
rho_ab.graph(color=['VEC0'] + [f'I{i}' for i in sysa], iterations=500, fix=fix, k=0.001)
right_ix = [f'b{i}' for i in sysa]
left_ix = [f'k{i}' for i in sysa]
rho_ab_lo = rho_ab.aslinearoperator(left_ix, right_ix)
rho_ab_lo
S_a = - approx_spectral_function(rho_ab_lo, f=xlogx, verbosity=1, R=10)
S_a
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we specify how sites we want, how many gates to apply, and some other parameters
Step2: We generate a unique tag for each gate we will apply, which we can also use to address all the gates only.
Step3: To make the graph a bit neater we can supply some fixed positions
Step4: When fixing graphs, it might also be necessary to play with the spring parameter k
Step5: We can see the 'lightcone' effect of adding propagate_tags='sites.
Step6: Again, it's a bit messy so we can specify some positions for some tensors
Step7: iterations can also be increased if the graph is not relaxing well.
Step8: Later color tags take precedence over earlier ones.
Step9: Manually perform partial trace
Step10: Again we can graph this
Step11: Estimate Subsystem Entropy
Step12: This can be quite slow, so wise to check progress
Step13: Which yields the final entropy (in bits) of the central 20 qubits as
|
2,566
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
for i,c in enumerate(celsius_q):
print("{} degrees Celsius = {} degrees Fahrenheit".format(c, fahrenheit_a[i]))
l0 = tf.keras.layers.Dense(units=1, input_shape=[1])
model = tf.keras.Sequential([l0])
model.compile(loss='mean_squared_error',
optimizer=tf.keras.optimizers.Adam(0.1))
history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training the model")
import matplotlib.pyplot as plt
plt.xlabel('Epoch Number')
plt.ylabel("Loss Magnitude")
plt.plot(history.history['loss'])
print(model.predict([100.0]))
print("These are the layer variables: {}".format(l0.get_weights()))
l0 = tf.keras.layers.Dense(units=4, input_shape=[1])
l1 = tf.keras.layers.Dense(units=4)
l2 = tf.keras.layers.Dense(units=1)
model = tf.keras.Sequential([l0, l1, l2])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training the model")
print(model.predict([100.0]))
print("Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit".format(model.predict([100.0])))
print("These are the l0 variables: {}".format(l0.get_weights()))
print("These are the l1 variables: {}".format(l1.get_weights()))
print("These are the l2 variables: {}".format(l2.get_weights()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Basics
Step2: Set up training data
Step3: Some Machine Learning terminology
Step4: Assemble layers into the model
Step5: Note
Step6: These are used during training (model.fit(), below) to first calculate the loss at each point, and then improve it. In fact, the act of calculating the current loss of a model and then improving it is precisely what training is.
Step7: In later videos, we will go into more detail on what actually happens here and how a Dense layer actually works internally.
Step8: Use the model to predict values
Step9: The correct answer is $100 \times 1.8 + 32 = 212$, so our model is doing really well.
Step10: The first variable is close to ~1.8 and the second to ~32. These values (1.8 and 32) are the actual variables in the real conversion formula.
|
2,567
|
<ASSISTANT_TASK:>
Python Code:
from symbulate import *
%matplotlib inline
die = list(range(1, 6 + 1))
P = BoxModel(die, size=2)
X = RV(P, sum)
Y = RV(P, max)
die = list(range(1, 6 + 1))
P = BoxModel(die, size=2)
X = RV(P, sum)
Y = RV(P, max)
(X & Y).sim(10000).tabulate(normalize=True)
die = list(range(1, 6 + 1))
P = BoxModel(die, size=2)
X = RV(P, sum)
Y = RV(P, max)
(X & Y).sim(10000).plot(jitter = True, alpha = 0.01)
covmatrix = [[1, -0.5],
[-0.5, 4]]
X, Y = RV(MultivariateNormal(mean = [0, 1], cov = covmatrix)) # see below for notes on "unpacking"
X, Y, Z = RV(Binomial(5, 0.5) * Normal(0, 1) * Uniform(0, 1)) # see below for notes on "unpacking"
(X & Y & Z).sim(10000)
X = RV(Binomial(5, 0.5))
Y = RV(Normal(0, 1))
Z = RV(Uniform(0, 1))
X, Y, Z = AssumeIndependent(X, Y, Z)
X, Y = RV(Normal(0,1) ** 2) # see below for notes on "unpacking"
(X & Y).sim(10000).plot(alpha = 0.01)
X = RV(Binomial(5, 0.5) * Normal(0, 1))
X.sim(3)
X = RV(Binomial(5, 0.5) * Normal(0, 1))
X[0].sim(10000).plot()
X = RV(Binomial(5, 0.5) * Normal(0, 1))
X.sim(10000)[1].plot()
# unpacked version
X, Y, Z = RV(Binomial(5, 0.5) * Normal(0,1) * Uniform(0,1))
Y.sim(10000).plot()
(X & Y & Z).sim(4)
# vector version
XYZ = RV(Binomial(5, 0.5) * Normal(0,1) * Uniform(0,1))
X = XYZ[0]
Y = XYZ[1]
Z = XYZ[2]
Y.sim(10000).plot()
XYZ.sim(4)
X = RV(Binomial(5, 0.5) * Normal(0, 1) * Poisson(4))
X[2].sim(10000).plot()
X.sim(10000).mean()
X.sim(10000).sd()
covmatrix = [[1, -0.5],
[-0.5, 4]]
X, Y = RV(MultivariateNormal(mean = [0, 1], cov = covmatrix))
xy = (X & Y).sim(10000)
xy.mean()
xy.var()
P = Uniform(a=0, b=1) ** 2
X = RV(P, min)
Y = RV(P, max)
xy = (X & Y).sim(10000)
plot(xy, alpha = 0.01)
xy.cov()
covmatrix = [[1, -0.5],
[-0.5, 4]]
X, Y = RV(MultivariateNormal(mean=[0, 1], cov=covmatrix)) # see below for notes on "unpacking"
xy = (X & Y).sim(10000)
xy.cov()
(X & Y & X+Y).sim(10000).cov()
X, Y = RV(BivariateNormal(mean1=0, mean2=1, sd1=1, sd2=2, corr=-0.25 ))
xy = (X & Y).sim(10000)
xy.corr()
(X & Y & X+Y).sim(10000).corr()
X, Y = RV(Poisson(lam=2.3) * Poisson(lam=1.7))
Z = X + Y
Z.sim(10000).plot()
X, Y = RV(Normal(0, 1) ** 2)
Z = sqrt(X ** 2 + Y ** 2)
Z.sim(10000).plot()
X, Y = RV(Exponential(1)**2)
W = X + Y
Z = X / W
(W & Z).sim(10000).plot(alpha = 0.05)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='joint'></a>
Step2: <a id='ampersand'></a>
Step3: <a id='plot'></a>
Step4: See the section on Symbulate graphics for more details on plotting options and functionality.
Step5: Custom joint distributions can be specified using ProbabilitySpace. For example, it is possible to specify a joint distribution via conditional and marginal distributions.
Step6: The product syntax emphasizes that the random variables are defined on the same probability space (a product space). It is also possible to define each random variable separately and then use the AssumeIndependent command. The following code is equivalent to the above code. Either syntax has the effect of creating an unspecified probability space upon which random variables $X, Y, Z$ are defined via unspecified functions such that $X$, $Y$, and $Z$ are independent and have the specified marginal distributions.
Step7: Random variables are independent and identically distribution (i.i.d.) when they are independent and have a common marginal distribution. For example, if V represents the number of heads in two flips of a penny and W the number of Heads in two flips of a dime, then V and W are i.i.d., with a common marginal Binomial(n=2, p=0.5) distribution. For i.i.d. random variables, defining the joint distribution using the "exponentiation" notation ** makes the code a little more compact.
Step8: <a id='rv'></a>
Step9: Components of a random vector X can be accessed using brackets []. Note that Python starts the index at 0, so the first entry of a vector X is X[0], the second entry is X[1], etc. Each component of a random vector is a random variable so indexing using brackets produces a random variable which can be manipulated accordingly.
Step10: Brackets can be used to access components of the random vector itself, or the simulated values of a random vector
Step11: <a id='unpack'></a>
Step12: <a id='marginal'></a>
Step13: Example. A multivariate normal example, with "unpacking".
Step14: <a id='cov'></a>
Step15: Example. A multivariate normal example.
Step16: When simulating more than two random variables, applying .cov() returns the covariance matrix of covariances between each pair of values (with the variances on the diagonal).
Step17: <a id='corr'></a>
Step18: When simulating more than two random variables, applying .corr() returns the correlation matrix of correlations between each pair of values (with 1s on the diagonal since a variable is perfectly correlated with itself).
Step19: <a id='transform'></a>
Step20: Example. The coordinates of a "random point" in the $(x, y)$ plane are random variables $X$ and $Y$ chosen independently of each other, each according to a Normal(0, 1) distribution. Produce a plot of the approximate distribution of $Z$, the distance of the $X, Y$ point from the origin.
Step21: Example. Let $X$ and $Y$ be i.i.d. Exponential(1) random variables. Produce a plot of the approximate joint distribution of $W = X+ Y$ and $Z = X / W$.
|
2,568
|
<ASSISTANT_TASK:>
Python Code:
Initialization
'''
Standard modules
'''
import os
import pickle
import sqlite3
import time
from pprint import pprint
'''
Analysis modules
'''
import pandas as pd
'''
Custom modules
'''
import config
import utilities
'''
Misc
'''
nb_name = '20171011-daheng-check_topics_basic_statistics'
Print out manually selected topics information
for topic_ind, topic in enumerate(config.MANUALLY_SELECTED_TOPICS_LST):
print('({}/{}) {}'.format(topic_ind+1, len(config.MANUALLY_SELECTED_TOPICS_LST), topic))
%%time
Register
TOPICS_LST_PKL = os.path.join(DATA_DIR, 'topics.lst.pkl')
in config.
if 0 == 1:
supplement_topics_lst = []
'''
Load in pickle for news data over selected period.
'''
news_period_df = pd.read_pickle(config.NEWS_PERIOD_DF_PKL)
for topic_ind, topic in enumerate(config.MANUALLY_SELECTED_TOPICS_LST):
localtime = time.asctime(time.localtime(time.time()))
print('({}/{}) processing topic: {} ... {}'.format(topic_ind+1,
len(config.MANUALLY_SELECTED_TOPICS_LST),
topic['name'],
localtime))
'''
Match out associated news titles.
'''
asso_news_native_ids_lst = []
for ind, row in news_period_df.iterrows():
if utilities.news_title_match(row['news_title'], topic['keywords_lst'], verbose=False):
asso_news_native_ids_lst.append(row['news_native_id'])
topic['news_native_ids_lst'] = asso_news_native_ids_lst
'''
Query associated tweets
'''
asso_tweets_ids_lst = []
query_news_tweets = '''
select tweet_id from tweets
where news_native_id = :news_native_id
order by tweet_id asc;'''
with sqlite3.connect(config.NEWS_TWEETS_DB_FILE) as conn:
cursor = conn.cursor()
for news_native_id in topic['news_native_ids_lst']:
cursor.execute(query_news_tweets, {'news_native_id': news_native_id})
tweets_ids_lst = [item[0] for item in cursor.fetchall()]
asso_tweets_ids_lst.extend(tweets_ids_lst)
topic['tweets_ids_lst'] = asso_tweets_ids_lst
supplement_topics_lst.append(topic)
'''
Make pickle
'''
with open(config.TOPICS_LST_PKL, 'wb') as f:
pickle.dump(supplement_topics_lst, f)
Test recover topics lst pkl
if 0 == 1:
with open(config.TOPICS_LST_PKL, 'rb') as f:
topics_lst = pickle.load(f)
for topic_ind, topic in enumerate(topics_lst):
print('{} Topic_name: {}; news_num: {}; tweets_num: {}'.format(topic_ind,
topic['name'],
len(topic['news_native_ids_lst']),
len(topic['tweets_ids_lst'])))
Recover pkl
if 1 == 1:
with open(config.TOPICS_LST_PKL, 'rb') as f:
topics_lst = pickle.load(f)
Select topic
if 1 == 1:
target_topic_ind = 26
topic = topics_lst[target_topic_ind]
'''
Print associated news titles
'''
if 1 == 1:
print('TOPIC: {}; KEYWORDS: {}'.format(topic['name'], topic['keywords_lst']))
# limit to first 100 news
news_native_ids_lst = topic['news_native_ids_lst'][:100]
query_news = '''
select news_title, news_collected_time from news
where news_native_id = :news_native_id
order by news_native_id asc;'''
with sqlite3.connect(config.NEWS_TWEETS_DB_FILE) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
for news_native_id in news_native_ids_lst:
cursor.execute(query_news, {'news_native_id': news_native_id})
for row in cursor.fetchall():
print('{}: {}'.format(row['news_collected_time'], row['news_title']))
'''
Print associated tweets
'''
if 1 == 1:
print('TOPIC: {}; KEYWORDS: {}'.format(topic['name'], topic['keywords_lst']))
# limit to first 150 tweets
tweets_ids_lst = topic['tweets_ids_lst'][:150]
query_tweets = '''
select tweet_text, tweet_collected_time from tweets
where tweet_id = :tweet_id
order by tweet_native_id asc;'''
with sqlite3.connect(config.NEWS_TWEETS_DB_FILE) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
for tweet_id in tweets_ids_lst:
cursor.execute(query_tweets, {'tweet_id': tweet_id})
for row in cursor.fetchall():
print('{}: {}'.format(row['tweet_collected_time'], row['tweet_text']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check basic statistics of manually selected topics
Step3: Manually compile a list of topics with keywords
Step5: Check number of associated news and tweets for each topic
Step7: Recover pickle and print number of news and tweets for each topic
Step10: Check news titles and sample tweets of each topic
|
2,569
|
<ASSISTANT_TASK:>
Python Code:
import rebound
sim = rebound.Simulation()
sim.add(m=1., x=1., vz = 2.)
sim.add(m=1., a=1.)
sim.status()
sim.add(m=1.e-3, a=100.)
sim.add(primary=sim.particles[1], a=0.01)
orbits = sim.calculate_orbits()
for orbit in orbits:
print(orbit)
print(sim.particles[3].calculate_orbit(sim, primary=sim.particles[1]))
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0., inc=0.1, Omega=0.3, omega=0.1)
orbits = sim.calculate_orbits()
print(orbits[0])
print(orbits[0].theta)
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0., inc=0.1, Omega=0.3, theta = 0.4)
orbits = sim.calculate_orbits()
print(orbits[0].theta)
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.2, Omega=0.1)
orbits = sim.calculate_orbits()
print(orbits[0])
print(orbits[0].pomega)
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.2, pomega=0.1)
orbits = sim.calculate_orbits()
print(orbits[0])
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.1, Omega=0.3, M = 0.1)
sim.add(a=1., e=0.1, Omega=0.3, l = 0.4)
orbits = sim.calculate_orbits()
print(orbits[0].l)
print(orbits[1].l)
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.1, omega=1.)
orbits = sim.calculate_orbits()
print(orbits[0])
import random
import numpy as np
def simulation(par):
e,f = par
e = 10**e
f = 10**f
sim = rebound.Simulation()
sim.add(m=1.)
a = 1.
inc = random.random()*np.pi
Omega = random.random()*2*np.pi
sim.add(m=0.,a=a,e=e,inc=inc,Omega=Omega, f=f)
o=sim.calculate_orbits()[0]
if o.f < 0: # avoid wrapping issues
o.f += 2*np.pi
err = max(np.fabs(o.e-e)/e, np.fabs(o.f-f)/f)
return err
random.seed(1)
N = 100
es = np.linspace(-16.,-1.,N)
fs = np.linspace(-16.,-1.,N)
params = [(e,f) for e in es for f in fs]
pool=rebound.InterruptiblePool()
res = pool.map(simulation, params)
res = np.array(res).reshape(N,N)
res = np.nan_to_num(res)
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.colors import LogNorm
import matplotlib
f,ax = plt.subplots(1,1,figsize=(7,5))
extent=[fs.min(), fs.max(), es.min(), es.max()]
ax.set_xlim(extent[0], extent[1])
ax.set_ylim(extent[2], extent[3])
ax.set_xlabel(r"true anomaly (f)")
ax.set_ylabel(r"eccentricity")
im = ax.imshow(res, norm=LogNorm(), vmax=1., vmin=1.e-16, aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb = plt.colorbar(im, ax=ax)
cb.solids.set_rasterized(True)
cb.set_label("Relative Error")
def simulation(par):
e,theta = par
e = 10**e
theta = 10**theta
sim = rebound.Simulation()
sim.add(m=1.)
a = 1.
inc = random.random()*np.pi
Omega = random.random()*2*np.pi
omega = random.random()*2*np.pi
sim.add(m=0.,a=a,e=e,inc=inc,Omega=Omega, theta=theta)
o=sim.calculate_orbits()[0]
if o.theta < 0:
o.theta += 2*np.pi
err = max(np.fabs(o.e-e)/e, np.fabs(o.theta-theta)/theta)
return err
random.seed(1)
N = 100
es = np.linspace(-16.,-1.,N)
thetas = np.linspace(-16.,-1.,N)
params = [(e,theta) for e in es for theta in thetas]
pool=rebound.InterruptiblePool()
res = pool.map(simulation, params)
res = np.array(res).reshape(N,N)
res = np.nan_to_num(res)
f,ax = plt.subplots(1,1,figsize=(7,5))
extent=[thetas.min(), thetas.max(), es.min(), es.max()]
ax.set_xlim(extent[0], extent[1])
ax.set_ylim(extent[2], extent[3])
ax.set_xlabel(r"true longitude (\theta)")
ax.set_ylabel(r"eccentricity")
im = ax.imshow(res, norm=LogNorm(), vmax=1., vmin=1.e-16, aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb = plt.colorbar(im, ax=ax)
cb.solids.set_rasterized(True)
cb.set_label("Relative Error")
sim.add(a=-0.2, e=1.4)
sim.status()
sim = rebound.Simulation()
sim.add(m=1.)
q = 0.1
a=-1.e14
e=1.+q/np.fabs(a)
sim.add(a=a, e=e)
print(sim.calculate_orbits()[0])
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1.,inc=np.pi,e=0.1, Omega=0., pomega=1.)
print(sim.calculate_orbits()[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Any components not passed automatically default to 0. REBOUND can also accept orbital elements.
Step2: We always have to pass a semimajor axis (to set a length scale), but any other elements are by default set to 0. Notice that our second star has the same vz as the first one due to the default Jacobi elements. Now we could add a distant planet on a circular orbit,
Step3: This planet is set up relative to the binary center of mass (again due to the Jacobi coordinates), which is probably what we want. But imagine we now want to place a test mass in a tight orbit around the second star. If we passed things as above, the orbital elements would be referenced to the binary/outer-planet center of mass. We can override the default by explicitly passing a primary (any instance of the Particle class)
Step4: All simulations are performed in Cartesian elements, so to avoid the overhead, REBOUND does not update particles' orbital elements as the simulation progresses. However, we can always calculate them when required with sim.calculate_orbits(). Note that REBOUND will always output angles in the range $[-\pi,\pi]$, except the inclination which is always in $[0,\pi]$.
Step5: Notice that there is always one less orbit than there are particles, since orbits are only defined between pairs of particles. We see that we got the first two orbits right, but the last one is way off. The reason is that again the REBOUND default is that we always get Jacobi elements. But we initialized the last particle relative to the second star, rather than the center of mass of all the previous particles.
Step6: though we could have simply avoided this problem by adding bodies from the inside out (second star, test mass, first star, circumbinary planet).
Step7: The problem here is that $\omega$ (the angle from the ascending node to pericenter) is ill-defined for a circular orbit, so it's not clear what we mean when we pass it, and we get spurious results (i.e., $\omega = 0$ rather than 0.1, and $f=0.1$ rather than the default 0). Similarly, $f$, the angle from pericenter to the particle's position, is undefined. However, the true longitude $\theta$, the broken angle from the $x$ axis to the ascending node = $\Omega + \omega + f$, and then to the particle's position, is always well defined
Step8: To be clearer and ensure we get the results we expect, we could instead pass theta to specify the longitude we want, e.g.
Step9: Here we have a planar orbit, in which case the line of nodes becomes ill defined, so $\Omega$ is not a good variable, but we pass it anyway! In this case, $\omega$ is also undefined since it is referenced to the ascending node. Here we get that now these two ill-defined variables get flipped. The appropriate variable is pomega ($\varpi = \Omega + \omega$), which is the angle from the $x$ axis to pericenter
Step10: We can specify the pericenter of the orbit with either $\omega$ or $\varpi$
Step11: Note that if the inclination is exactly zero, REBOUND sets $\Omega$ (which is undefined) to 0, so $\omega = \varpi$.
Step12: Accuracy
Step13: We see that the behavior is poor, which is physically due to $f$ becoming poorly defined at low $e$. If instead we initialize the orbits with the true longitude $\theta$ as discussed above, we get much better results
Step14: Hyperbolic & Parabolic Orbits
Step15: Currently there is no support for exactly parabolic orbits, but we can get a close approximation by passing a nearby hyperbolic orbit where we can specify the pericenter = $|a|(e-1)$ with $a$ and $e$. For example, for a 0.1 AU pericenter,
Step16: Retrograde Orbits
|
2,570
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from sympy.abc import i
init_printing()
alpha, beta, gamma = symbols(r'\alpha \beta \gamma')
x_ave, y_ave = symbols(r'\langle{x}\rangle \langle{y}\rangle')
x2_ave, xy_ave = symbols(r'\langle{x^{2}}\rangle \langle{xy}\rangle')
x3_ave, x2y_ave = symbols(r'\langle{x^{3}}\rangle \langle{yx^{2}}\rangle')
x4_ave = symbols(r'\langle{x^{4}}\rangle')
A = Matrix([[1, x_ave, x2_ave],
[x_ave, x2_ave, x3_ave],
[x2_ave, x3_ave, x4_ave]])
A
b = Matrix([y_ave, xy_ave, x2y_ave])
b
a_vec = A.inv() * b
a0 = a_vec[0].simplify()
a0
a1 = a_vec[1].simplify()
a1
a2 = a_vec[2].simplify()
a2
# Variable name - I am not using sympy methods as I want them to
# end up as vectorised numpy functions
x_aveP, y_aveP = symbols(r'xA yA')
x2_aveP, xy_aveP = symbols(r'x2A xyA')
x3_aveP, x2y_aveP = symbols(r'x3A x2yA')
x4_aveP = symbols(r'x4A')
to_python_dict = {x_ave: x_aveP, y_ave:y_aveP, x2_ave:x2_aveP, xy_ave:xy_aveP,
x3_ave: x3_aveP, x2y_ave:x2y_aveP, x4_ave:x4_aveP}
print_python(a0.subs(to_python_dict))
print_python(a1.subs(to_python_dict))
print_python(a2.subs(to_python_dict))
def QuadraticCoefficients(x, y):
xA = np.mean(x)
yA = np.mean(y)
x2A = np.mean(x**2)
xyA = np.mean(x*y)
x2yA = np.mean(x**2 * y)
x3A = np.mean(x**3)
x4A = np.mean(x**4)
a0 = ((x2A**2*x2yA - x2A*x3A*xyA - x2A*x4A*yA - x2yA*x3A*xA + x3A**2*yA + x4A*xA*xyA)
/(x2A**3 - 2*x2A*x3A*xA - x2A*x4A + x3A**2 + x4A*xA**2))
a1 = ((x2A**2*xyA - x2A*x2yA*xA - x2A*x3A*yA + x2yA*x3A + x4A*xA*yA - x4A*xyA)
/(x2A**3 - 2*x2A*x3A*xA - x2A*x4A + x3A**2 + x4A*xA**2))
a2 = ((-x2yA*(x2A - xA**2) - xyA*(x2A*xA - x3A) + yA*(x2A*(x2A - xA**2) + xA*(x2A*xA - x3A)))
/((x2A - xA**2)*(x2A**2 - x4A) + (x2A*xA - x3A)**2))
return np.array([a0, a1, a2])
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
a_tv = np.array([10.0, -2.0, 0.34])
n = 1000
x_data = np.linspace(0, 10, n)
y_data = np.polyval(a_tv[[2,1,0]], x_data) + np.random.normal(0, 0.5, n)
a_bf = QuadraticCoefficients(x_data, y_data)
y_bf = np.polyval(a_bf[[2,1,0]], x_data)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x_data, y_data, label="Data")
ax.plot(x_data, y_bf, "-r", label="Best fit", lw=4.0)
ax.set_xlabel("x", size=22)
ax.set_ylabel("y", size=22, rotation='horizontal')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Construct matrix A
Step2: Construct b
Step3: Solve for $\vec{a}$
Step4: The $a_0$ component
Step5: The $a_{1}$ component
Step6: The $a_{2}$ component
Step7: Testing
|
2,571
|
<ASSISTANT_TASK:>
Python Code:
# Inicializamos una figura con el tamaño que necesitemos
# si no la queremos por defecto
# Creamos unos ejes con la proyección que queramos
# por ejemplo, Mercator
# Y lo que queremos representar en el mapa
# Tierra
# Océanos
# Líneas de costa (podemos modificar el color)
# Fronteras
# Ríos y lagos
# Por último, podemos pintar el grid, si nos interesa
# Inicializamos una figura con el tamaño que necesitemos
# si no la queremos por defecto
# Elegimos la proyección InterruptedGoodeHomolosine
# Y lo que queremos representar en el mapa
# Importamos los formatos de ejes para latitud y longitud
# Elegimos la proyección PlateCarree
# Y lo que queremos representar en el mapa
# Tierra
# Océanos
# Líneas de costa (podemos modificar el color)
# Fronteras
# Ríos y lagos
# Dentro de los ejes seleccionamos las lineas del grid y
# activamos la opción de mostrar etiquetas
# Sobre las líneas del grid, ajustamos el formato en x e y
# Elegimos la proyección
# Fijar el punto y la extensión del mapa que queremos ver
# Y lo que queremos representar en el mapa
# Importando Natural Earth Feature
# Elegimos la proyección
# Fijar el punto y la extensión del mapa que queremos ver
# Y lo que queremos representar en el mapa
# Hasta ahora utilizábamos:
# ax.add_feature(cfeature.COASTLINE,
# edgecolor=(0.3, 0.3, 0.3),
# facecolor=cfeature.COLORS['land']
# )
# Pero ahora descargaremos primero la característica que
# queremos representar:
# Y después la añadiremso al mapa con las propiedades que creamos convenientes.
# Leeremos el csv que ya tenemos descargado utilizando pandas
# Cremos un mapa sobre el que representar los datos:
# Elegimos la proyección PlateCarree
# Y representamos las líneas de costa
# Ahora podemos añadir sobre ese mapa los datos con un scatter
# preserve
from netCDF4 import Dataset
from netCDF4 import date2index
from datetime import datetime
# preserve
data = Dataset('../data/gistemp250.nc')
# preserve
timeindex = date2index(datetime(2014, 1, 15),
data.variables['time'])
# preserve
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
lon, lat = np.meshgrid(lon, lat)
temp_anomaly = data.variables['tempanomaly'][timeindex]
# preserve
fig = plt.figure(figsize=(8,4))
# Elegimos la proyección
ax = plt.axes(projection=ccrs.PlateCarree())
# Y lo que queremos representar en el mapa
coastline = NaturalEarthFeature(category='physical', name='coastline', scale='50m')
# ax.add_feature(land, color=cfeature.COLORS['land'])
ax.add_feature(coastline, facecolor=cfeature.COLORS['land'], edgecolor='k', alpha=0.5)
ax.pcolormesh(lon, lat, temp_anomaly, cmap='RdBu_r')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: InterruptedGoodeHomolosine
Step2: Puede interesarnos poner etiquetas a los ejes. Podemos utilizar entonces las herramientas dentro de
Step3: Fijando la extensión de nuestra representación
Step4: Como se ve en la figura enterior, la representación que obtenemos es demasiado burda. Esto se debe a que los datos por defecto se encuentran descargados a una escala poco detallada.
Step5: Desde Naturale Earth Feature, no sólo podemos descargar caraterísticas físicas, sino que también podemos acceder a datasets demográficos
Step6: Casi cualquier representación de las que hemos cisto anteriormente con matploltib es posible.
Step7: The file contains many global temperature readings on a variety of dates; we need to select the index of the date we're interested in—in this case, January 15, 2014
Step8: Now we can load the latitude and longitude data, as well as the temperature anomaly for this index
Step9: Finally, we'll use the pcolormesh() method to draw a color mesh of the data.
|
2,572
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pyemu
la = pyemu.Schur("pest.jco",verbose=False,forecasts=[])
la.drop_prior_information()
jco_ord = la.jco.get(la.pst.obs_names,la.pst.adj_par_names)
ord_base = "pest_ord"
jco_ord.to_binary(ord_base + ".jco")
la.pst.write(ord_base+".pst")
pv_names = []
predictions = ["pd_ten", "c_obs10_2"]
for pred in predictions:
pv = jco_ord.extract(pred).T
pv_name = pred + ".vec"
pv.to_ascii(pv_name)
pv_names.append(pv_name)
prior_uncfile = "pest.unc"
la.parcov.to_uncfile(prior_uncfile,covmat_file=None)
post_mat = "post.cov"
post_unc = "post.unc"
args = [ord_base + ".pst","1.0",prior_uncfile,
post_mat,post_unc,"1"]
pd7_in = "predunc7.in"
f = open(pd7_in,'w')
f.write('\n'.join(args)+'\n')
f.close()
out = "pd7.out"
pd7 = os.path.join("i64predunc7.exe")
os.system(pd7 + " <" + pd7_in + " >"+out)
for line in open(out).readlines():
print(line)
post_pd7 = pyemu.Cov.from_ascii(post_mat)
la_ord = pyemu.Schur(jco=ord_base+".jco",predictions=predictions)
post_pyemu = la_ord.posterior_parameter
#post_pyemu = post_pyemu.get(post_pd7.row_names)
delta = (post_pd7 - post_pyemu).x
(post_pd7 - post_pyemu).to_ascii("delta.cov")
print(delta.sum())
print(delta.max(),delta.min())
args = [ord_base + ".pst", "1.0", prior_uncfile, None, "1"]
pd1_in = "predunc1.in"
pd1 = os.path.join("i64predunc1.exe")
pd1_results = {}
for pv_name in pv_names:
args[3] = pv_name
f = open(pd1_in, 'w')
f.write('\n'.join(args) + '\n')
f.close()
out = "predunc1" + pv_name + ".out"
os.system(pd1 + " <" + pd1_in + ">" + out)
f = open(out,'r')
for line in f:
if "pre-cal " in line.lower():
pre_cal = float(line.strip().split()[-2])
elif "post-cal " in line.lower():
post_cal = float(line.strip().split()[-2])
f.close()
pd1_results[pv_name.split('.')[0].lower()] = [pre_cal, post_cal]
pyemu_results = {}
for pname in la_ord.prior_prediction.keys():
pyemu_results[pname] = [np.sqrt(la_ord.prior_prediction[pname]),
np.sqrt(la_ord.posterior_prediction[pname])]
f = open("predunc1_textable.dat",'w')
for pname in pd1_results.keys():
print(pname)
f.write(pname+"&{0:6.5f}&{1:6.5}&{2:6.5f}&{3:6.5f}\\\n"\
.format(pd1_results[pname][0],pyemu_results[pname][0],
pd1_results[pname][1],pyemu_results[pname][1]))
print("prior",pname,pd1_results[pname][0],pyemu_results[pname][0])
print("post",pname,pd1_results[pname][1],pyemu_results[pname][1])
f.close()
f = open("pred_list.dat",'w')
out_files = []
for pv in pv_names:
out_name = pv+".predvar1b.out"
out_files.append(out_name)
f.write(pv+" "+out_name+"\n")
f.close()
args = [ord_base+".pst","1.0","pest.unc","pred_list.dat"]
for i in range(36):
args.append(str(i))
args.append('')
args.append("n") #no for most parameters
args.append("y") #yes for mult
f = open("predvar1b.in", 'w')
f.write('\n'.join(args) + '\n')
f.close()
os.system("predvar1b.exe <predvar1b.in")
pv1b_results = {}
for out_file in out_files:
pred_name = out_file.split('.')[0]
f = open(out_file,'r')
for _ in range(3):
f.readline()
arr = np.loadtxt(f)
pv1b_results[pred_name] = arr
la_ord_errvar = pyemu.ErrVar(jco=ord_base+".jco",
predictions=predictions,
omitted_parameters="mult1",
verbose=False)
df = la_ord_errvar.get_errvar_dataframe(np.arange(36))
df
fig = plt.figure(figsize=(6,6))
max_idx = 15
idx = np.arange(max_idx)
for ipred,pred in enumerate(predictions):
arr = pv1b_results[pred][:max_idx,:]
first = df[("first", pred)][:max_idx]
second = df[("second", pred)][:max_idx]
third = df[("third", pred)][:max_idx]
ax = plt.subplot(len(predictions),1,ipred+1)
#ax.plot(arr[:,1],color='b',dashes=(6,6),lw=4,alpha=0.5)
#ax.plot(first,color='b')
#ax.plot(arr[:,2],color='g',dashes=(6,4),lw=4,alpha=0.5)
#ax.plot(second,color='g')
#ax.plot(arr[:,3],color='r',dashes=(6,4),lw=4,alpha=0.5)
#ax.plot(third,color='r')
ax.scatter(idx,arr[:,1],marker='x',s=40,color='g',
label="PREDVAR1B - first term")
ax.scatter(idx,arr[:,2],marker='x',s=40,color='b',
label="PREDVAR1B - second term")
ax.scatter(idx,arr[:,3],marker='x',s=40,color='r',
label="PREVAR1B - third term")
ax.scatter(idx,first,marker='o',facecolor='none',
s=50,color='g',label='pyEMU - first term')
ax.scatter(idx,second,marker='o',facecolor='none',
s=50,color='b',label="pyEMU - second term")
ax.scatter(idx,third,marker='o',facecolor='none',
s=50,color='r',label="pyEMU - third term")
ax.set_ylabel("forecast variance")
ax.set_title("forecast: " + pred)
if ipred == len(predictions) -1:
ax.legend(loc="lower center",bbox_to_anchor=(0.5,-0.75),
scatterpoints=1,ncol=2)
ax.set_xlabel("singular values")
#break
plt.savefig("predvar1b_ver.eps")
cmd_args = [os.path.join("i64identpar.exe"),ord_base,"5",
"null","null","ident.out","/s"]
cmd_line = ' '.join(cmd_args)+'\n'
print(cmd_line)
print(os.getcwd())
os.system(cmd_line)
identpar_df = pd.read_csv("ident.out",delim_whitespace=True)
la_ord_errvar = pyemu.ErrVar(jco=ord_base+".jco",
predictions=predictions,
verbose=False)
df = la_ord_errvar.get_identifiability_dataframe(5)
df
fig = plt.figure()
ax = plt.subplot(111)
axt = plt.twinx()
ax.plot(identpar_df["identifiability"])
ax.plot(df["ident"].values)
ax.set_xlim(-10,600)
diff = identpar_df["identifiability"].values - df["ident"].values
#print(diff)
axt.plot(diff)
axt.set_ylim(-1,1)
ax.set_xlabel("parmaeter")
ax.set_ylabel("identifiability")
axt.set_ylabel("difference")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: instaniate pyemu object and drop prior info. Then reorder the jacobian and save as binary. This is needed because the pest utilities require strict order between the control file and jacobian
Step2: extract and save the forecast sensitivity vectors
Step3: save the prior parameter covariance matrix as an uncertainty file
Step4: PRECUNC7
Step5: load the posterior matrix written by predunc7
Step6: The cumulative difference between the two posterior matrices
Step7: PREDUNC1
Step8: organize the pyemu results into a structure for comparison
Step9: compare the results
Step10: PREDVAR1b
Step11: now for pyemu
Step12: generate some plots to verify
Step13: Identifiability
Step14: cheap plot to verify
|
2,573
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'niwa', 'ukesm1-0-ll', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
2,574
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from floweaver import *
df1 = pd.read_csv('holiday_data.csv')
dataset = Dataset(df1)
df1
partition_job = Partition.Simple('Employment Job', np.unique(df1['Employment Job']))
partition_activity = Partition.Simple('Activity', np.unique(df1['Activity']))
# these statements or the ones above do the same thing
partition_job = dataset.partition('Employment Job')
partition_activity = dataset.partition('Activity')
nodes = {
'Activity': ProcessGroup(['Activity'], partition_activity),
'Job': ProcessGroup(['Employment Job'], partition_job),
}
bundles = [
Bundle('Activity', 'Job'),
]
ordering = [
['Activity'],
['Job'],
]
# These are the same each time, so just write them here once
size_options = dict(width=500, height=400,
margins=dict(left=100, right=100))
sdd = SankeyDefinition(nodes, bundles, ordering)
weave(sdd, dataset, measures='Calories Burnt').to_widget(**size_options)
sdd = SankeyDefinition(nodes, bundles, ordering, flow_partition=partition_job)
weave(sdd, dataset, palette='Set2_8', measures='Calories Burnt').to_widget(**size_options)
weave(sdd, dataset, link_color=QuantitativeScale('Calories Burnt'), measures='Calories Burnt').to_widget(**size_options)
weave(sdd, dataset, measures={'Calories Burnt': 'sum', 'Enjoyment': 'mean'}, link_width='Calories Burnt',
link_color=QuantitativeScale('Enjoyment')).to_widget(**size_options)
weave(sdd, dataset, measures={'Calories Burnt': 'sum', 'Enjoyment': 'mean'}, link_width='Calories Burnt',
link_color=QuantitativeScale('Enjoyment', intensity='Calories Burnt')).to_widget(**size_options)
scale = QuantitativeScale('Enjoyment', palette='Blues_9')
weave(sdd, dataset,
measures={'Calories Burnt': 'sum', 'Enjoyment': 'mean'},
link_width='Calories Burnt',
link_color=scale) \
.to_widget(**size_options)
scale.domain
class MyScale(QuantitativeScale):
def get_palette(self, link):
# Choose colour scheme based on link type (here, Employment Job)
name = 'Greens_9' if link.type == 'Student' else 'Blues_9'
return self.lookup_palette_name(name)
def get_color(self, link, value):
palette = self.get_palette(link)
return palette(0.2 + 0.8*value)
my_scale = MyScale('Enjoyment', palette='Blues_9')
weave(sdd, dataset,
measures={'Calories Burnt': 'sum', 'Enjoyment': 'mean'},
link_width='Calories Burnt',
link_color=my_scale) \
.to_widget(**size_options)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now take a look at the dataset we are using. This is a very insightful [made-up] dataset about how different types of people lose weight while on holiday enjoying themselves.
Step2: We now define the partitions of the data. Rather than listing the categories by hand, we use np.unique to pick out a list of the unique values that occur in the dataset.
Step3: In fact, this is pretty common so there is a built-in function to do this
Step4: We then go on to define the structure of our sankey. We define nodes, bundles and the order. In this case its pretty straightforward
Step5: Now we will plot a Sankey that shows the share of time dedicated to each activity by each type of person.
Step6: We can start using colour by specifying that we want to partition the flows according to type of person. Notice that this time we are using a pre-determined palette.
Step7: Now, if we want to make the colour of the flow to be proportional to a numerical value. Use the hue parameter to set the name of the variable that you want to display in colour. To start off, let's use "value", which is the width of the lines
Step8: It's more interesting to use colour to show a different attribute from the flow table. But because a line in the Sankey diagram is an aggregation of multiple flows in the original data, we need to specify how the new dimension will be aggregated. For example, we'll use the mean of the flows within each Sankey link to set the colour. In this case we will use the colour to show how much each type of person emjoys each activity. We can be interested in either the cumulative enjoyment, or the mean enjoyment
Step9: You can change the colour palette using the palette attribute. The palette names are different from before, because those were categorical (or qualitative) scales, and this is now a sequential scale. The palette names are listed here.
Step10: It is possible to create a colorbar / scale to show the range of intensity values, but it's not currently as easy as it should be. This should be improved in future.
|
2,575
|
<ASSISTANT_TASK:>
Python Code:
class RegExp2NFA:
def __init__(self, Sigma):
self.Sigma = Sigma
self.StateCount = 0
def toNFA(self, r):
if r == 0:
return self.genEmptyNFA()
if r == '':
return self.genEpsilonNFA()
if isinstance(r, str) and len(r) == 1:
return self.genCharNFA(r)
if r[0] == 'cat':
return self.catenate(self.toNFA(r[1]), self.toNFA(r[2]))
if r[0] == 'or':
return self.disjunction(self.toNFA(r[1]), self.toNFA(r[2]))
if r[0] == 'star':
return self.kleene(self.toNFA(r[1]))
raise ValueError(f'{r} is not a proper regular expression.')
RegExp2NFA.toNFA = toNFA
del toNFA
def genEmptyNFA(self):
q0 = self.getNewState()
q1 = self.getNewState()
delta = {}
return {q0, q1}, self.Sigma, delta, q0, { q1 }
RegExp2NFA.genEmptyNFA = genEmptyNFA
del genEmptyNFA
def genEpsilonNFA(self):
q0 = self.getNewState()
q1 = self.getNewState()
delta = { (q0, ''): {q1} }
return {q0, q1}, self.Sigma, delta, q0, { q1 }
RegExp2NFA.genEpsilonNFA = genEpsilonNFA
del genEpsilonNFA
def genCharNFA(self, c):
q0 = self.getNewState()
q1 = self.getNewState()
delta = { (q0, c): {q1} }
return {q0, q1}, self.Sigma, delta, q0, { q1 }
RegExp2NFA.genCharNFA = genCharNFA
del genCharNFA
def catenate(self, f1, f2):
M1, Sigma, delta1, q1, A1 = f1
M2, Sigma, delta2, q3, A2 = f2
q2, = A1
delta = delta1 | delta2
delta[q2, ''] = {q3}
return M1 | M2, Sigma, delta, q1, A2
RegExp2NFA.catenate = catenate
del catenate
def disjunction(self, f1, f2):
M1, Sigma, delta1, q1, A1 = f1
M2, Sigma, delta2, q2, A2 = f2
q3, = A1
q4, = A2
q0 = self.getNewState()
q5 = self.getNewState()
delta = delta1 | delta2
delta[q0, ''] = { q1, q2 }
delta[q3, ''] = { q5 }
delta[q4, ''] = { q5 }
return { q0, q5 } | M1 | M2, Sigma, delta, q0, { q5 }
RegExp2NFA.disjunction = disjunction
del disjunction
def kleene(self, f):
M, Sigma, delta0, q1, A = f
q2, = A
q0 = self.getNewState()
q3 = self.getNewState()
delta = delta0
delta[q0, ''] = { q1, q3 }
delta[q2, ''] = { q1, q3 }
return { q0, q3 } | M, Sigma, delta, q0, { q3 }
RegExp2NFA.kleene = kleene
del kleene
def getNewState(self):
self.StateCount += 1
return self.StateCount
RegExp2NFA.getNewState = getNewState
del getNewState
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The member function toNFA takes an object self of class RegExp2NFA and a regular expression r and returns a finite state machine
Step2: The <span style="font-variant
Step3: The <span style="font-variant
Step4: For a letter $c \in \Sigma$ the <span style="font-variant
Step5: Given two <span style="font-variant
Step6: Given two <span style="font-variant
Step7: Given an <span style="font-variant
Step8: The function getNewState returns a new number that has not yet been used as a state.
|
2,576
|
<ASSISTANT_TASK:>
Python Code:
from adaptivemd import mongodb
mongodb.MongoDBStorage._db_url
mongodb.MongoDBStorage.set_port(27018)
mongodb.MongoDBStorage._db_url
mongodb.MongoDBStorage.set_host('128.219.191.255')
mongodb.MongoDBStorage._db_url
mongodb.MongoDBStorage.set_location('localhost:27017')
mongodb.MongoDBStorage._db_url
from adaptivemd import Project
Project.set_dbhost('128.219.186.38')
mongodb.MongoDBStorage._db_url
Project.set_dbport('27018')
mongodb.MongoDBStorage._db_url
Project.set_dburl('mongodb://not-correct:dburl/')
mongodb.MongoDBStorage._db_url
Project.set_dblocation('localhost:27017')
mongodb.MongoDBStorage._db_url
Project.list()
# Use this to completely remove the tutorial project from the database.
Project.delete('tutorial')
Project.list()
project = Project('tutorial')
project.list()
project.initialize({'shared_path': '$HOME/admd'})
project.configurations.one.shared_path
from adaptivemd import File
pdb_file = File('file://../files/alanine/alanine.pdb')
pdb_file.name = 'initial_pdb'
pdb_file.load()
from adaptivemd.engine.openmm import OpenMMEngine
engine = OpenMMEngine(
pdb_file=pdb_file,
system_file=File('file://../files/alanine/system.xml').load(),
integrator_file=File('file://../files/alanine/integrator.xml').load(),
args='-r --report-interval 1 -p CPU'
).named('openmm')
engine.name
engine.add_output_type('master', 'master.dcd', stride=10)
engine.add_output_type('protein', 'protein.dcd', stride=1, selection='protein')
from adaptivemd.analysis.pyemma import PyEMMAAnalysis
modeller = PyEMMAAnalysis(
engine=engine,
outtype='protein',
features={'add_inverse_distances': {'select_Backbone': None}}
).named('pyemma')
#project.generators.add(engine)
#project.generators.add(modeller)
project.generators.add([engine, modeller])
len(project.generators)
project.generators.add(engine)
len(project.generators)
trajectory = project.new_trajectory(engine['pdb_file'], 100, engine)
#trajectory = project.new_trajectory(pdb_file, 100, engine)
trajectory
print(trajectory.length)
print(trajectory[20].exists)
print(trajectory[20])
print(trajectory[19].exists)
print(trajectory[19])
print(trajectory.extend(100))
print(trajectory.run())
task = trajectory.run()
trajectory.exists
#FIXME#project.queue(trajectory)
# shortcut for project.tasks.add(task)
project.queue(task)
len(project.tasks)
task.state
project.tasks.all.state
# now there are data files & folders associated with the trajectory
project.wait_until(task.is_done)
task.state
# use the 'one' method inherited from bundle to see available methods
# for the worker type, such as 'execute'
#project.workers.one.execute('shutdown')
# but use 'all' method in practice to apply across all members of
# the workers bundle in the typical case, where you have many workers
project.workers.all.execute('shutdown')
project.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To change the port number, use the set_port method of the MongoDBStorage interface class
Step2: Likewise, reset the host address with set_host
Step3: Or the whole location at one time with set_location
Step4: The database URL used by AdaptiveMD is a class attribute of MongoDBStorage, so it is 'global' for a python session. You cannot use multiple databases in a single session. Before loading a Project, this must be set correctly, or else you will get a pymongo connection error.
Step5: A bit more on connecting
Step6: With the Project, the whole URL can also be set at one time if you'd like
Step7: Now for the fun stuff
Step8: Note that if you have trajectories or models saved in a pre-existing folder for a project named tutorial, they are not deleted. They will be overwritten as new data is produced. The new project will iterate through pre-existing trajectory names and overwrite the data as each next tutorial is run. The data must be manually moved if desired, or deleted. Only the MongoDB storage associated with the project has been affected by Project.delete('tutorial').
Step9: Now we have a handle for our project. First thing is to set it up to work on a resource.
Step10: File Objects
Step11: First we define a File object. Instead of just a string, these are used to represent files anywhere, on the cluster or your local application. There are some subclasses or extensions of File that have additional meta information like Trajectory or Frame. The underlying base object of a File is called a Location.
Step12: File, like any complex object in adaptivemd, can have a .name attribute that makes them easier to find later. You can either set the .name property after creation, or use a little helper method .named() to get a one-liner. This function will set .name and return itself.
Step13: The .load() at the end is important. It causes the File object to load the content of the file, and if you save the File object in the database, the actual file is stored with it. This way it can simply be rewritten on the cluster or anywhere else.
Step14: Generator Objects
Step15: A task generator will create tasks that workers use to run simulations. Currently, this means a little python script is created that will excute OpenMM. It requires conda to be added to the PATH variable, or at least openmm to be included in the python installation used by the resource. If you set up your resource correctly, then the task should execute automatically via a worker.
Step16: We have now an OpenMMEngine which uses the previously made pdb File object in the location defined by its shared_path. The same for the OpenMM XML files, along with some args to run using the CPU kernel, etc.
Step17: Next, we need to set the output types we want the engine to generate. We chose a stride of 10 for the master trajectory without selection, and save a second trajectory selecting only protein atoms and native stride.
Step18: The selection must be an mdtraj formatted atom selection string.
Step19: The object that computes an MSM model from existing trajectories that you pass it. It is initialized with a .pdb file that is used to create features between the $c_\alpha$ atoms. This implementaton requires a PDB but in general this is not necessay. It is specific to my PyEMMAAnalysis show case.
Step20: Again we name it pyemma for later reference.
Step21: Note, that you cannot add the same engine instance twice (or any stored object to its store). If you create a new but equivalent engine, it will be considered different and hence you can store it again.
Step22: Create one initial trajectory
Step23: This says, initial is alanine.pdb run for 100 frames and is named xxxxxxxx. This is the name of a folder in the data directory, where trajectory files will be stored. Multiple atom selections, e.g. protein and all atoms, may be written to create multiple files in this folder. We will refer to these distinct trajectories as the outtypes later.
Step24: and since the length is fixed, we know how many frames there are and can access them
Step25: extend method to elongate the trajectory in an additional task
Step26: run method gives us a task that will do an MD simulation and create the trajectory
Step27: We can ask to extend it, we can save it. We can reference specific frames in it before running a simulation. You could even build a whole set of related simulations this way without running a single frame. This is pretty powerful especially in the context of running asynchronous simulations.
Step28: That's it, just take a trajectory description and turn it into a task that contains the shell commands and needed files, etc. Use the property trajectory.exists so see whether the trajectory object is associated with any data.
Step29: Submit the task to the queue
Step30: That is all we can do from here. To execute the tasks you need to create a worker using the adaptivemdworker command from the shell
Step31: If you are done for now, its also good practice to relieve your workers (and save yourself some compute time charges on HPC resources!). You don't have to, even if you're closing the project's database connection. They are associated with the project and will accept tasks at any point that are entered in.
Step32: The final project.close() will close the DB connection. The daemon outside the notebook would be closed separately.
|
2,577
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.sparse as sparse
np.random.seed(10)
max_vector_size = 1000
vectors = [np.random.randint(100,size=900),np.random.randint(100,size=max_vector_size),np.random.randint(100,size=950)]
result = sparse.lil_matrix((len(vectors), max_vector_size))
for i, v in enumerate(vectors):
result[i, :v.size] = v
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,578
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('C:\Anaconda2\envs\dato-env\Lib\site-packages')
import graphlab
def polynomial_sframe(feature, degree):
# assume that degree >= 1
# initialize the SFrame:
poly_sframe = graphlab.SFrame()
# and set poly_sframe['power_1'] equal to the passed feature
poly_sframe['power_1'] = feature
# first check if degree > 1
if degree > 1:
# then loop over the remaining degrees:
# range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# then assign poly_sframe[name] to the appropriate power of feature
poly_sframe[name] = feature ** power
return poly_sframe
import matplotlib.pyplot as plt
%matplotlib inline
sales = graphlab.SFrame('kc_house_data.gl/')
sales = sales.sort(['sqft_living','price'])
l2_small_penalty = 1e-5
poly15_data = polynomial_sframe(sales['sqft_living'], 15) # use equivalent of `polynomial_sframe`
poly15_features = poly15_data.column_names() # get the name of the features
poly15_data['price'] = sales['price'] # add price to the data since it's the target
model1 = graphlab.linear_regression.create(poly15_data, target = 'price',
features = poly15_features, l2_penalty=l2_small_penalty,
validation_set=None,verbose=False)
model1.get("coefficients")
(semi_split1, semi_split2) = sales.random_split(.5,seed=0)
(set_1, set_2) = semi_split1.random_split(0.5, seed=0)
(set_3, set_4) = semi_split2.random_split(0.5, seed=0)
def get_poly_model(set_data, l2_penalty):
poly15_data = polynomial_sframe(set_data['sqft_living'], 15)
poly15_features = poly15_data.column_names() # get the name of the features
poly15_data['price'] = set_data['price'] # add price to the data since it's the target
model15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = poly15_features,
l2_penalty=l2_penalty,
validation_set=None,verbose=False)
return poly15_data, model15
def get_coef(set_data, l2_penalty):
poly15_data, model15 = get_poly_model(set_data, l2_penalty)
return model15.get("coefficients")
def plot_fitted_line(set_data, l2_penalty):
poly15_data, model15 = get_poly_model(set_data, l2_penalty)
return plt.plot(poly15_data['power_1'],poly15_data['price'],'.',
poly15_data['power_1'], model15.predict(poly15_data),'-')
set_1_coef = get_coef(set_1, l2_small_penalty)
print set_1_coef[set_1_coef['name'] == 'power_1']
plot_fitted_line(set_1, l2_small_penalty)
set_2_coef = get_coef(set_2, l2_small_penalty)
print set_2_coef[set_2_coef['name'] == 'power_1']
plot_fitted_line(set_2, l2_small_penalty)
set_3_coef = get_coef(set_3, l2_small_penalty)
print set_3_coef[set_3_coef['name'] == 'power_1']
plot_fitted_line(set_3, l2_small_penalty)
set_4_coef = get_coef(set_4, l2_small_penalty)
print set_4_coef[set_4_coef['name'] == 'power_1']
plot_fitted_line(set_4, l2_small_penalty)
l2_new_penalty = 1e5
set_1_coef = get_coef(set_1, l2_new_penalty)
print set_1_coef[set_1_coef['name'] == 'power_1']
plot_fitted_line(set_1, l2_new_penalty)
set_2_coef = get_coef(set_2, l2_new_penalty)
print set_2_coef[set_2_coef['name'] == 'power_1']
plot_fitted_line(set_2, l2_new_penalty)
set_3_coef = get_coef(set_3, l2_new_penalty)
print set_3_coef[set_3_coef['name'] == 'power_1']
plot_fitted_line(set_3, l2_new_penalty)
set_4_coef = get_coef(set_4, l2_new_penalty)
print set_4_coef[set_4_coef['name'] == 'power_1']
plot_fitted_line(set_4, l2_new_penalty)
(train_valid, test) = sales.random_split(.9, seed=1)
train_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1)
n = len(train_valid_shuffled)
k = 10 # 10-fold cross-validation
for i in xrange(k):
start = (n*i)/k
end = (n*(i+1))/k-1
print i, (start, end)
train_valid_shuffled[0:10] # rows 0 to 9
print len(train_valid_shuffled)
# start = (n*i)/k
# end = (n*(i+1))/k-1
# validation4 = train_valid_shuffled[(n*3)/k : (n*(3+1))/k-1] #5818, 7757
validation4 = train_valid_shuffled[5818 : 7757]
print int(round(validation4['price'].mean(), 0))
n = len(train_valid_shuffled)
first_two = train_valid_shuffled[0:2]
last_two = train_valid_shuffled[n-2:n]
print first_two.append(last_two)
first_part = train_valid_shuffled[0:5817]
last_part = train_valid_shuffled[7758:]
train4 = first_part.append(last_part)
print len(train4)
print int(round(train4['price'].mean(), 0))
import numpy as np
def k_fold_cross_validation(k, l2_penalty, data, output_name, features_list):
rss_sum = 0
n = len(data)
for i in xrange(k):
start = (n*i)/k
end = (n*(i+1))/k-1
validation_set = data[start:end+1]
training_set = data[0:start].append(data[end+1:n])
model = graphlab.linear_regression.create(training_set, target = output_name, features = features_list,
l2_penalty=l2_penalty,
validation_set=None,verbose=False)
predictions = model.predict(validation_set)
residuals = validation_set['price'] - predictions
rss = sum(residuals * residuals)
rss_sum += rss
validation_error = rss_sum / k # average = sum / size or you can use np.mean(list_of_validation_error)
return validation_error
poly_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15)
my_features = poly_data.column_names()
poly_data['price'] = train_valid_shuffled['price']
val_err_dict = {}
for l2_penalty in np.logspace(1, 7, num=13):
val_err = k_fold_cross_validation(10, l2_penalty, poly_data, 'price', my_features)
print l2_penalty#, val_err
val_err_dict[l2_penalty] = val_err
print val_err_dict
import pprint
pprint.pprint(val_err_dict)
print min(val_err_dict.items(), key=lambda x: x[1])
min_val = min(val_err_dict.itervalues())
print min_val
print min(val_err_dict, key=val_err_dict.get)
l2_penalty = graphlab.SArray(val_err_dict.keys())
validation_error = graphlab.SArray(val_err_dict.values())
sf = graphlab.SFrame({'l2_penalty':l2_penalty,'validation_error':validation_error})
print sf
# Plot the l2_penalty values in the x axis and the cross-validation error in the y axis.
# Using plt.xscale('log') will make your plot more intuitive.
plt.plot(sf['l2_penalty'],sf['validation_error'],'k.')
plt.xscale('log')
poly_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15)
features_list = poly_data.column_names()
poly_data['price'] = train_valid_shuffled['price']
l2_penalty_best = 1000.0
model = graphlab.linear_regression.create(poly_data, target='price',
features=features_list,
l2_penalty=l2_penalty_best,
validation_set=None)
poly_test = polynomial_sframe(test['sqft_living'], 15)
predictions = model.predict(poly_test)
errors = predictions-test['price']
rss = (errors*errors).sum()
print rss
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Polynomial regression, revisited
Step2: Let's use matplotlib to visualize what a polynomial regression looks like on the house data.
Step3: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.
Step4: Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using polynomial_sframe() and fit a model with these features. When fitting the model, use an L2 penalty of 1e-5
Step5: Note
Step6: QUIZ QUESTION
Step7: Next, fit a 15th degree polynomial on set_1, set_2, set_3, and set_4, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model.
Step8: The four curves should differ from one another a lot, as should the coefficients you learned.
Step9: These curves should vary a lot less, now that you applied a high degree of regularization.
Step10: Once the data is shuffled, we divide it into equal segments. Each segment should receive n/k elements, where n is the number of observations in the training set and k is the number of segments. Since the segment 0 starts at index 0 and contains n/k elements, it ends at index (n/k)-1. The segment 1 starts where the segment 0 left off, at index (n/k). With n/k elements, the segment 1 ends at index (n*2/k)-1. Continuing in this fashion, we deduce that the segment i starts at index (n*i/k) and ends at (n*(i+1)/k)-1.
Step11: Let us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of train_valid_shuffled. Notice that the first index (0) is included in the slice but the last index (10) is omitted.
Step12: Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the train_valid_shuffled dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above.
Step13: To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234.
Step14: After designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0
Step15: Extract the remainder of the data after excluding fourth segment (segment 3) and assign the subset to train4.
Step16: To verify that we have the right elements extracted, run the following cell, which computes the average price of the data with fourth segment excluded. When rounded to nearest whole number, the average should be $539,450.
Step17: Now we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) k, (ii) l2_penalty, (iii) dataframe, (iv) name of output column (e.g. price) and (v) list of feature names. The function returns the average validation error using k segments as validation sets.
Step18: Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following
Step19: QUIZ QUESTIONS
Step20: Once you found the best value for the L2 penalty using cross-validation, it is important to retrain a final model on all of the training data using this value of l2_penalty. This way, your final model will be trained on the entire dataset.
Step21: QUIZ QUESTION
|
2,579
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
PARAM_FILE = BUCKET_NAME + "/params.txt"
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_boston.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
model.evaluate(x_test, y_test)
# Get the rescaling values,.
with tf.io.gfile.GFile(PARAM_FILE, "r") as f:
rescale = f.read()
# Convert string to floating point list
rescale = rescale.replace("[", "").replace("]", "")
rescale = [float(val) for val in rescale.split(",")]
print(rescale)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
ENDPOINT_NAME = "boston_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "boston_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape)
def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None)
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: Hardware Accelerators
Step13: Container (Docker) image
Step14: Machine Type
Step15: Tutorial
Step16: Train a model
Step17: Prepare your disk specification
Step18: Define the worker pool specification
Step19: Assemble a job specification
Step20: Examine the training package
Step21: Task.py contents
Step22: Store training script on your Cloud Storage bucket
Step23: Train the model
Step24: Now get the unique identifier for the custom job you created.
Step25: Get information on a custom job
Step26: Deployment
Step27: Load the saved model
Step28: Evaluate the model
Step29: Perform the model evaluation
Step30: Get dataset statistics
Step31: Get the serving function signature
Step32: Upload the model
Step33: Get Model resource information
Step34: Deploy the Model resource
Step35: Now get the unique identifier for the Endpoint resource you created.
Step36: Compute instance scaling
Step37: Deploy Model resource to the Endpoint resource
Step38: Make a online prediction request
Step39: Send the prediction request
Step40: Undeploy the Model resource
Step41: Cleaning up
|
2,580
|
<ASSISTANT_TASK:>
Python Code:
Image(url="http://i.giphy.com/LY1DH1AMbG0tq.gif")
Image(url="http://i.giphy.com/12eayhW3TRPCjS.gif")
# Charger la lib
import pandas as pd
#Afficher l'aide
#pd.read_csv?
data = pd.read_csv('data/train.csv') # Chargement des données.
data.head()
data.tail()
data.shape
data.dtypes
data.describe()
data.head()
data.info()
data.Embarked.value_counts(normalize=True)
Image(url="http://i.giphy.com/I3wsrN9ndx11m.gif")
# Voir le mask des données absente sur une colonne
pd.isnull(data.Embarked)
#Connaitre les lignes ou il y a des données absentes
data[pd.isnull(data.Embarked)] # On affiche notre dataframe avec une condition
data["Embarked"] = data["Embarked"].fillna("S")
data[pd.isnull(data.Embarked)] # Il n'y a plus de données absentes
data.head()
sns.barplot(x='Survived', y="Embarked", data=data)#, order=[1,0])
# Quelle est la survie moyenne selon les valeurs "Embarked"
Embarked_group = data[["Embarked", "Survived"]].groupby(['Embarked'], as_index=False).mean()
Embarked_group
# Graphique
sns.barplot(x='Embarked', y='Survived', data=Embarked_group)
data.Fare.describe()
sns.boxplot(data.Fare)
sns.factorplot(x="Survived", y="Fare",
data=data, kind="box")
# Analyse de la distribution du prix des billets
sns.distplot(data.Fare)
fare_survived = data[data.Survived == 1]
fare_not_survived = data[data.Survived == 0]
fare_not_survived.head()
plt.figure(figsize=(12,5)) # Agrandir le graphique
sns.distplot(fare_survived.Fare, label="Survived") # Survived
sns.distplot(fare_not_survived.Fare, label="Dead") # Dead
plt.legend() # On affiche la légende
data.Sex.value_counts()
sns.countplot(data.Sex)
# Moyenne de survie suivant le sexe du passager
grp_sex = data[["Sex", "Survived"]].groupby(['Sex'],as_index=False).mean()
grp_sex
sns.barplot(x='Sex', y='Survived', data=grp_sex)
sns.countplot(data.Pclass)
# Moyenne de survie suivant la classe du passager
grp_class = data[["Pclass", "Survived"]].groupby(['Pclass'],as_index=False).mean()
sns.barplot(x='Pclass', y='Survived', data=grp_class)
data.info()
sns.boxplot(data.Age)
data[pd.isnull(data.Age)].head()
len(data[pd.isnull(data.Age)]) # Nombre de ligne ou il n'y a pas d'age...
data.Age.value_counts()
data_age_1 = data.copy() # On fait une copie de notre DataFrame original
data_age_1['Age'] = data_age_1['Age'].fillna(24)
# Distribution de l'age de nos données d'origine (Age n'est pas vide)
sns.distplot(data[~pd.isnull(data.Age)]['Age'])
# Distribution de l'age de nos données lorque l'on remplie les données manquantes par la plus présente
sns.distplot(data_age_1.Age)
moyenne_age = data.Age.mean()
moyenne_age
data_age_2 = data.copy() # On fait une copie de notre DataFrame original
data_age_2['Age'] = data_age_2['Age'].fillna(moyenne_age)
sns.distplot(data_age_2.Age)
#### Connaitre la médianne d'age des passagers :
median_age = data.Age.median()
median_age
data_age_3 = data.copy() # On fait une copie de notre DataFrame original
data_age_3['Age'] = data_age_3['Age'].fillna(median_age)
sns.distplot(data_age_3.Age)
mean_age = data["Age"].mean()
std_age = data["Age"].std()
nbr_age_nan = data["Age"].isnull().sum()
print "Moyenne est " + str(mean_age) + " avec un écart-type de " + str(std_age) + " et " + str(nbr_age_nan) + " valeurs sont absentes"
# Lib de calcul
import numpy as np
np.random.randint(1, 10, 1)
#(mean - std) & (mean + std)
new_age = np.random.randint(mean_age - std_age, mean_age + std_age, size = nbr_age_nan)
new_age
new_age.mean()
data_age_4 = data.copy() # On fait une copie de notre DataFrame original
# Remplir les valeurs d'age manquantes par notre nouvelle série de données :
data_age_4.loc[pd.isnull(data_age_4['Age']), 'Age'] = new_age
# Simulation d'une nouvelle série d'age
sns.distplot(data_age_4.Age)
# Données d'origine :
sns.distplot(data[~pd.isnull(data.Age)]['Age'])
#Relation entre l'age et la survie des passagers
# On transforme l'age en int
data_age_4['Age'] =data_age_4['Age'].astype('int')
# On prend la moyenne de survie par age
grp_age = data_age_4[["Age", "Survived"]].groupby(['Age'], as_index=False).mean()
plt.figure(figsize=(15,5)) # Agrandir le graphique
sns.barplot(x='Age', y='Survived', data=grp_age)
age_survived = data_age_4[data_age_4.Survived == 1]
age_not_survived = data_age_4[data_age_4.Survived == 0]
plt.figure(figsize=(15,5)) # Agrandir le graphique
sns.kdeplot(age_survived['Age'], label="Survived") # Survived
sns.kdeplot(age_not_survived['Age'], label="Dead") # Dead
plt.legend() # On affiche la légende
sns.lmplot('Age','Survived',hue='Pclass',data=data_age_4)
# Réaction des spé Big data qui veulent up leurs scores sur Kaggle ;)
Image(url="http://i.giphy.com/xTiTnnLkYTDWSOWSHK.gif")
data[['Parch', 'SibSp']].describe()
data['Family'] = data["Parch"] + data["SibSp"]
data.Family.value_counts()
# Est ce que la personne à de la famille (oui ou non) --> Booléen
data['is_Family'] = 0 # On initialise notre nouvelle colonne
data.loc[data['Family'] > 0, 'is_Family'] = 1
data.loc[data['Family'] == 0, 'is_Family'] = 0
data.is_Family.value_counts()
sns.countplot(x='is_Family', data=data)
grp_is_family = data[["is_Family", "Survived"]].groupby(['is_Family'],as_index=False).mean()
sns.barplot(x='is_Family', y='Survived', data=grp_is_family)
sns.factorplot('is_Family', data=data,hue='Sex', kind='count')
sns.factorplot('Pclass', data=data,hue='Sex', kind='count')
fig = sns.FacetGrid(data, row="Sex", col='Pclass')
fig.map(sns.barplot,'is_Family', 'Survived')
fig = sns.FacetGrid(data, row="Sex", col='Pclass')
fig.map(sns.kdeplot,'Age')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Titanic dataset
Step2: Analyse data
Step3: Pour regarder les données
Step4: Signification des colonnes
Step5: 2) Connaitre les type des colonnes
Step6: 3) Connaitre la distribution des nos données
Step7: Uniquement que pour les données numériques
Step8: Embarked
Step9: Il semble qu'il manque quelques valeurs pour Embarked et on déteste les valeurs absentes...
Step10: On sait que la valeurs la plus présente (largement) est "S". On va donc remplir les données vide par "S"
Step11: On regarde si "Embarked" est lié à la survie des passagers
Step12: Fare
Step13: Le prix est il lié à la survie ?
Step14: Sexe des passagers
Step15: Pclass des passagers
Step16: Age des passagers
Step17: Lorsqu'il y a une absence non négligeable de données (pour les données continues), il y a plusieurs possibilité afin de résoudre le problème
Step18: 24 est la valeurs la plus présente
Step19: Connaitre la moyenne d'age des passagers
Step20: Créer une série de données qui respecte la distribution d'origine
Step21: La dernière méthode est la plus respectueuse des donnnées
Step22: La famille
Step23: Analyse général des données
|
2,581
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
from revrand.basis_functions import RandomRBF, RandomLaplace, RandomCauchy, RandomMatern32, RandomMatern52, \
FastFoodRBF, OrthogonalRBF, FastFoodGM, BasisCat
from revrand import Parameter, Positive
# Style
pl.style.use('ggplot')
pl.rc('font', **{'size': 26})
N = 1000
D = 10 # Kernel dimension
nbases = 1500
lenscale = 1.5
mean = -1.
inrange = 4
# Data
x = np.zeros((N, D))
y = np.tile(np.linspace(-inrange, inrange, N), (D, 1)).T
def dist(power=2):
return (np.abs((x - y)**power)).sum(axis=1)
# RBF
def kern_rbf():
return np.exp(- dist() / (2 * lenscale**2))
# Cauchy
def kern_cau():
return 1. / (1 + dist() / lenscale**2)
# Laplace
def kern_lap():
return np.exp(- dist(power=1) / lenscale)
# Matern 3/2
def kern_m32():
dterm = np.sqrt(3) * np.sqrt(dist()) / lenscale
return (1 + dterm) * np.exp(-dterm)
# Matern 5/2
def kern_m52():
dterm = np.sqrt(5) * np.sqrt(dist()) / lenscale
return (1 + dterm + dterm**2 / 3.) * np.exp(-dterm)
def kern_combo():
return 0.5 * kern_lap() + 2 * kern_rbf()
rbf = RandomRBF(Xdim=D, nbases=nbases)
cau = RandomCauchy(Xdim=D, nbases=nbases)
lap = RandomLaplace(Xdim=D, nbases=nbases)
m32 = RandomMatern32(Xdim=D, nbases=nbases)
m52 = RandomMatern52(Xdim=D, nbases=nbases)
ff_rbf = FastFoodRBF(Xdim=D, nbases=nbases)
or_rbf = OrthogonalRBF(Xdim=D, nbases=nbases)
r_lap = Parameter(0.5, Positive())
r_rbf = Parameter(2., Positive())
combo = RandomLaplace(Xdim=D, nbases=nbases, regularizer=r_lap) + \
RandomRBF(Xdim=D, nbases=nbases, regularizer=r_rbf)
# Get expected kernel evaluations
def radialbasis2kern(basis):
V, _ = basis.regularizer_diagonal(x)
l = [lenscale] * len(basis.bases) if isinstance(basis, BasisCat) else [lenscale]
return (basis.transform(x, *l) * basis.transform(y, *l)).dot(V)
k_rbf = kern_rbf()
b_rbf = radialbasis2kern(rbf)
k_cau = kern_cau()
b_cau = radialbasis2kern(cau)
k_lap = kern_lap()
b_lap = radialbasis2kern(lap)
k_m32 = kern_m32()
b_m32 = radialbasis2kern(m32)
k_m52 = kern_m52()
b_m52 = radialbasis2kern(m52)
f_rbf = radialbasis2kern(ff_rbf)
o_rbf = radialbasis2kern(or_rbf)
k_combo = kern_combo()
f_combo = radialbasis2kern(combo)
distfrom00 = np.sign(y[:, 0]) * np.sqrt(dist(power=2))
def plotkern(k1, k2, k1_label=None, k2_label=None):
pl.figure(figsize=(15, 10))
pl.plot(distfrom00, k1, 'b', linewidth=3, alpha=0.5, label=k1_label)
pl.plot(distfrom00, k2, 'r--', linewidth=3, alpha=0.7, label=k2_label)
pl.grid(True)
pl.axis('tight')
pl.xlabel('$\| x - y \|$')
pl.ylabel('$k(x - y)$')
pl.legend()
pl.show()
plotkern(k_rbf, b_rbf, 'RBF kernel', 'RBF basis')
plotkern(k_cau, b_cau, 'Cauchy kernel', 'Cauchy basis')
plotkern(k_lap, b_lap, 'Laplace kernel', 'Laplace basis')
plotkern(k_m32, b_m32, 'Matern32 kernel', 'Matern32 basis')
plotkern(k_m52, b_m52, 'Matern52 kernel', 'Matern52 basis')
plotkern(k_rbf, f_rbf, 'RBF kernel', 'FastFood RBF basis')
plotkern(k_rbf, o_rbf, 'RBF kernel', 'Orthogonal RBF basis')
plotkern(k_combo, f_combo, 'Combo kernel', 'Combo basis')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Settings
Step2: Kernel functions
Step3: Basis functions
Step4: Evaluate kernels and bases
Step5: Plot the kernel functions
|
2,582
|
<ASSISTANT_TASK:>
Python Code:
# ionization degree alpha calculated from the Henderson-Hasselbalch equation for an ideal system
def ideal_alpha(pH, pK):
return 1. / (1 + 10**(pK - pH))
import matplotlib.pyplot as plt
import numpy as np
import setuptools
import pint # module for working with units and dimensions
assert setuptools.version.pkg_resources.packaging.specifiers.SpecifierSet('>=0.10.1').contains(pint.__version__), \
f'pint version {pint.__version__} is too old: several numpy operations can cast away the unit'
import espressomd
espressomd.assert_features(['WCA', 'ELECTROSTATICS'])
import espressomd.electrostatics
import espressomd.reaction_ensemble
import espressomd.polymer
from espressomd.interactions import HarmonicBond
ureg = pint.UnitRegistry()
TEMPERATURE = 300 * ureg.kelvin
KT = TEMPERATURE * ureg.boltzmann_constant
WATER_PERMITTIVITY = 80
BJERRUM_LENGTH = ureg.elementary_charge**2 / (4 * ureg.pi * ureg.vacuum_permittivity * WATER_PERMITTIVITY * KT)
ureg.define(f'sim_energy = {TEMPERATURE} * boltzmann_constant')
ureg.define(f'sim_length = 0.5 * {BJERRUM_LENGTH}')
ureg.define(f'sim_charge = 1 * e')
C_ACID = 1e-3 * ureg.molar
C_SALT = 2 * C_ACID
N_ACID = 20
BOX_V = (N_ACID / (ureg.avogadro_constant * C_ACID)).to("sim_length^3")
BOX_L = BOX_V ** (1 / 3)
BOX_L_UNITLESS = BOX_L.to("sim_length").magnitude
N_SALT = int((C_SALT * BOX_V * ureg.avogadro_constant).to('dimensionless'))
C_ACID_UNITLESS = C_ACID.to('mol/L').magnitude
C_SALT_UNITLESS = C_SALT.to('mol/L').magnitude
# acidity constant
pK = 4.88
K = 10**(-pK)
pKw = 14.0 # autoprotolysis constant of water
# variables for pH sampling
NUM_PHS = 15 # number of pH values
OFFSET = 2.0 # range of pH values to be used = pK +/- offset
pHmin = pK - OFFSET # lowest pH value to be used
pHmax = pK + OFFSET # highest pH value to be used
pHs = np.linspace(pHmin, pHmax, NUM_PHS) # list of pH values
# Simulate an interacting system with steric repulsion (Warning: it will be slower than without WCA!)
USE_WCA = False
# Simulate an interacting system with electrostatics (Warning: it will be very slow!)
USE_ELECTROSTATICS = False
if USE_ELECTROSTATICS:
assert USE_WCA, "You can not use electrostatics without a short range repulsive potential. Otherwise oppositely charged particles could come infinitely close."
N_BLOCKS = 16 # number of block to be used in data analysis
DESIRED_BLOCK_SIZE = 10 # desired number of samples per block
PROB_REACTION = 0.5 # probability of accepting the reaction move. This parameter changes the speed of convergence.
# number of reaction samples per each pH value
NUM_SAMPLES = int(N_BLOCKS * DESIRED_BLOCK_SIZE / PROB_REACTION)
# particle types of different species
TYPES = {
"HA": 0,
"A": 1,
"B": 2,
"Na": 3,
"Cl": 4,
}
# particle charges of different species
CHARGES = {
"HA": (0 * ureg.e).to("sim_charge").magnitude,
"A": (-1 * ureg.e).to("sim_charge").magnitude,
"B": (+1 * ureg.e).to("sim_charge").magnitude,
"Na": (+1 * ureg.e).to("sim_charge").magnitude,
"Cl": (-1 * ureg.e).to("sim_charge").magnitude,
}
system = espressomd.System(box_l=[BOX_L_UNITLESS] * 3)
system.time_step = 0.01
system.cell_system.skin = 0.4
np.random.seed(seed=10) # initialize the random number generator in numpy
# we need to define bonds before creating polymers
hb = HarmonicBond(k=30, r_0=1.0)
system.bonded_inter.add(hb)
# create the polymer positions
polymers = espressomd.polymer.linear_polymer_positions(n_polymers=1,
beads_per_chain=N_ACID,
bond_length=0.9, seed=23)
# add the polymer particles composed of ionizable acid groups, initially in the ionized state
for polymer in polymers:
prev_particle = None
for position in polymer:
p = system.part.add(pos=position, type=TYPES["A"], q=CHARGES["A"])
if prev_particle:
p.add_bond((hb, prev_particle))
prev_particle = p
# add the corresponding number of H+ ions
system.part.add(pos=np.random.random((N_ACID, 3)) * BOX_L_UNITLESS,
type=[TYPES["B"]] * N_ACID,
q=[CHARGES["B"]] * N_ACID)
# add salt ion pairs
system.part.add(pos=np.random.random((N_SALT, 3)) * BOX_L_UNITLESS,
type=[TYPES["Na"]] * N_SALT,
q=[CHARGES["Na"]] * N_SALT)
system.part.add(pos=np.random.random((N_SALT, 3)) * BOX_L_UNITLESS,
type=[TYPES["Cl"]] * N_SALT,
q=[CHARGES["Cl"]] * N_SALT)
if USE_WCA:
for type_1, type_2 in ((x, y) for x in TYPES.values() for y in TYPES.values()):
system.non_bonded_inter[type_1, type_2].wca.set_params(epsilon=1.0, sigma=1.0)
# relax the overlaps with steepest descent
system.integrator.set_steepest_descent(f_max=0, gamma=0.1, max_displacement=0.1)
system.integrator.run(20)
system.integrator.set_vv() # to switch back to velocity Verlet
# add thermostat and short integration to let the system relax
system.thermostat.set_langevin(kT=KT.to("sim_energy").magnitude, gamma=1.0, seed=7)
system.integrator.run(steps=1000)
if USE_ELECTROSTATICS:
p3m = espressomd.electrostatics.P3M(
prefactor=(BJERRUM_LENGTH * KT / (ureg.elementary_charge ** 2)
).to("sim_length * sim_energy / sim_charge^2").magnitude,
accuracy=1e-3)
system.actors.add(p3m)
else:
# this speeds up the simulation of dilute systems with small particle numbers
system.cell_system.set_n_square()
# empty numpy array as placeholders for collecting data
num_As_at_each_pH = -np.ones((len(pHs), NUM_SAMPLES)) # number of A- species observed at each sample
# run a productive simulation and collect the data
print(f"Simulated pH values: {pHs}")
for ipH, pH in enumerate(pHs):
print(f"Run pH {pH:.2f} ...")
RE.constant_pH = pH # set new pH value
equilibrate_pH() # pre-equilibrate to the new pH value
perform_sampling(NUM_SAMPLES, num_As_at_each_pH[ipH, :]) # perform sampling/ run production simulation
print(f"measured number of A-: {np.mean(num_As_at_each_pH[ipH]):.2f}, (ideal: {N_ACID*ideal_alpha(pH, pK):.2f})")
# statistical analysis of the results
def block_analyze(input_data, n_blocks=16):
data = np.asarray(input_data)
block = 0
# this number of blocks is recommended by Janke as a reasonable compromise
# between the conflicting requirements on block size and number of blocks
block_size = int(data.shape[1] // n_blocks)
print(f"block_size: {block_size}")
# initialize the array of per-block averages
block_average = np.zeros((n_blocks, data.shape[0]))
# calculate averages per each block
for block in range(n_blocks):
block_average[block] = np.average(data[:, block * block_size: (block + 1) * block_size], axis=1)
# calculate the average and average of the square
av_data = np.average(data, axis=1)
av2_data = np.average(data * data, axis=1)
# calculate the variance of the block averages
block_var = np.var(block_average, axis=0)
# calculate standard error of the mean
err_data = np.sqrt(block_var / (n_blocks - 1))
# estimate autocorrelation time using the formula given by Janke
# this assumes that the errors have been correctly estimated
tau_data = np.zeros(av_data.shape)
for val in range(av_data.shape[0]):
if av_data[val] == 0:
# unphysical value marks a failure to compute tau
tau_data[val] = -1.0
else:
tau_data[val] = 0.5 * block_size * n_blocks / (n_blocks - 1) * block_var[val] \
/ (av2_data[val] - av_data[val] * av_data[val])
return av_data, err_data, tau_data, block_size
# estimate the statistical error and the autocorrelation time using the formula given by Janke
av_num_As, err_num_As, tau, block_size = block_analyze(num_As_at_each_pH, N_BLOCKS)
print(f"av = {av_num_As}")
print(f"err = {err_num_As}")
print(f"tau = {tau}")
# calculate the average ionization degree
av_alpha = av_num_As / N_ACID
err_alpha = err_num_As / N_ACID
# plot the simulation results compared with the ideal titration curve
plt.figure(figsize=(10, 6), dpi=80)
plt.errorbar(pHs - pK, av_alpha, err_alpha, marker='o', linestyle='none',
label=r"simulation")
pHs2 = np.linspace(pHmin, pHmax, num=50)
plt.plot(pHs2 - pK, ideal_alpha(pHs2, pK), label=r"ideal")
plt.xlabel('pH-p$K$', fontsize=16)
plt.ylabel(r'$\alpha$', fontsize=16)
plt.legend(fontsize=16)
plt.show()
# check if the blocks contain enough data for reliable error estimates
print(f"uncorrelated samples per block:\nblock_size/tau = {block_size / tau}")
threshold = 10 # block size should be much greater than the correlation time
if np.any(block_size / tau < threshold):
print(f"\nWarning: some blocks may contain less than {threshold} uncorrelated samples."
"\nYour error estimated may be unreliable."
"\nPlease, check them using a more sophisticated method or run a longer simulation.")
print(f"? block_size/tau > threshold ? : {block_size / tau > threshold}")
else:
print(f"\nAll blocks seem to contain more than {threshold} uncorrelated samples."
"Error estimates should be OK.")
# plot the deviations from the ideal result
plt.figure(figsize=(10, 6), dpi=80)
ylim = np.amax(abs(av_alpha - ideal_alpha(pHs, pK)))
plt.ylim((-1.5 * ylim, 1.5 * ylim))
plt.errorbar(pHs - pK, av_alpha - ideal_alpha(pHs, pK),
err_alpha, marker='o', linestyle='none', label=r"simulation")
plt.plot(pHs - pK, 0.0 * ideal_alpha(pHs, pK), label=r"ideal")
plt.xlabel('pH-p$K$', fontsize=16)
plt.ylabel(r'$\alpha - \alpha_{ideal}$', fontsize=16)
plt.legend(fontsize=16)
plt.show()
# average concentration of B+ is the same as the concentration of A-
av_c_Bplus = av_alpha * C_ACID_UNITLESS
err_c_Bplus = err_alpha * C_ACID_UNITLESS # error in the average concentration
full_pH_range = np.linspace(2, 12, 100)
ideal_c_Aminus = ideal_alpha(full_pH_range, pK) * C_ACID_UNITLESS
ideal_c_OH = np.power(10.0, -(pKw - full_pH_range))
ideal_c_H = np.power(10.0, -full_pH_range)
# ideal_c_M is calculated from electroneutrality
ideal_c_M = np.clip((ideal_c_Aminus + ideal_c_OH - ideal_c_H), 0, np.inf)
# plot the simulation results compared with the ideal results of the cations
plt.figure(figsize=(10, 6), dpi=80)
plt.errorbar(pHs,
av_c_Bplus,
err_c_Bplus,
marker='o', c="tab:blue", linestyle='none',
label=r"measured $c_{\mathrm{B^+}}$", zorder=2)
plt.plot(full_pH_range, ideal_c_H, c="tab:green",
label=r"ideal $c_{\mathrm{H^+}}$", zorder=0)
plt.plot(full_pH_range, ideal_c_M, c="tab:orange",
label=r"ideal $c_{\mathrm{M^+}}$", zorder=0)
plt.plot(full_pH_range, ideal_c_Aminus, c="tab:blue", ls=(0, (5, 5)),
label=r"ideal $c_{\mathrm{A^-}}$", zorder=1)
plt.yscale("log")
plt.ylim(1e-6,)
plt.xlabel('input pH', fontsize=16)
plt.ylabel(r'concentration $c$ $[\mathrm{mol/L}]$', fontsize=16)
plt.legend(fontsize=16)
plt.show()
ideal_c_X = np.clip(-(ideal_c_Aminus + ideal_c_OH - ideal_c_H), 0, np.inf)
ideal_ionic_strength = 0.5 * \
(ideal_c_X + ideal_c_M + ideal_c_H + ideal_c_OH + 2 * C_SALT_UNITLESS)
# in constant-pH simulation ideal_c_Aminus = ideal_c_Bplus
cpH_ionic_strength = 0.5 * (ideal_c_Aminus + 2 * C_SALT_UNITLESS)
cpH_ionic_strength_measured = 0.5 * (av_c_Bplus + 2 * C_SALT_UNITLESS)
cpH_error_ionic_strength_measured = 0.5 * err_c_Bplus
plt.figure(figsize=(10, 6), dpi=80)
plt.errorbar(pHs,
cpH_ionic_strength_measured,
cpH_error_ionic_strength_measured,
c="tab:blue",
linestyle='none', marker='o',
label=r"measured", zorder=3)
plt.plot(full_pH_range,
cpH_ionic_strength,
c="tab:blue",
ls=(0, (5, 5)),
label=r"constant-pH", zorder=2)
plt.plot(full_pH_range,
ideal_ionic_strength,
c="tab:orange",
linestyle='-',
label=r"ideal", zorder=1)
plt.yscale("log")
plt.xlabel('input pH', fontsize=16)
plt.ylabel(r'Ionic Strength [$\mathrm{mol/L}$]', fontsize=16)
plt.legend(fontsize=16)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constant pH Method
Step2: The package pint is intended to make handling physical quantities with different units easy. You simply create an instance of <tt>pint.UnitRegistry</tt> and access its unit definitions and automatic conversions. For more information or a quick introduction please look at the pint-documentation or pint-tutorials.
Step3: The inputs that we need to define our system in the simulation include
Step4: Set concentrations and system size
Step5: Set reaction variables
Step6: Set non-bonded interaction flags
Step7: Set number of samples
Step8: Set particle types and charges
Step9: Initialize the ESPResSo system
Step10: Set up particles and bonded-interactions
Step11: Set up non-bonded-interactions
Step12: Set up reaction-ensemble method
Step13: Results
Step14: Here, we calculate the average number of particles of type $\mathrm{A^-}$ and estimate the error and auto-correlation time by the statistical analysis presented before.
Step15: The simulation results for the non-interacting case match very well with the analytical solution of Henderson-Hasselbalch equation. There are only minor deviations, and the estimated errors are small too. This situation will change when we introduce interactions.
Step16: To look in more detail at the statistical accuracy, it is useful to plot the deviations from the analytical result. This provides another way to check the consistency of error estimates. About 68% of the results should be within one error bar from the analytical result, whereas about 95% of the results should be within two times the error bar. Indeed, if you plot the deviations by running the script below, you should observe that most of the results are within one error bar from the analytical solution, a smaller fraction of the results is slightly further than one error bar, and one or two might be about two error bars apart. Again, this situation will change when we introduce interactions because the ionization of the interacting system should deviate from the Henderson-Hasselbalch equation.
Step17: The Neutralizing Ion $\mathrm{B^+}$
Step18: The plot shows that at intermediate pH the concentration of $\mathrm{B^+}$ ions is approximately equal to the concentration of $\mathrm{M^+}$ ions. Only at one specific $\mathrm{pH}$ the concentration of $\mathrm{B^+}$ ions is equal to the concentration of $\mathrm{H^+}$ ions. This is the pH one obtains when dissolving the weak acid $\mathrm{A}$ in pure water.
|
2,583
|
<ASSISTANT_TASK:>
Python Code:
responses = {}
responses
type(responses)
responses["hello"] = "world"
responses
responses["hola"] = "mundo"
responses
def greet(salutation):
try:
print(salutation, responses[salutation])
except KeyError:
print("Sorry, don't know how to respond to", salutation)
greet("hello")
greet("hola")
greet("你好")
responses["你好"] = "世界"
greet("你好")
counts = {}
sequence = "acggtattcggt"
for base in sequence:
if not base in counts:
counts[base] = 1
else:
counts[base] += 1
counts
k = 3
kmercounts = {}
for i in range(len(sequence) - k + 1):
kmer = sequence[i:i+k]
if not kmer in kmercounts:
kmercounts[kmer] = 1
else:
kmercounts[kmer] += 1
kmercounts
from IPython.core.display import SVG, display
display(SVG(filename='fig-graph.svg'))
graph = {}
graph
graph[1] = [2, 3, 4]
graph
graph[2] = [1, 3]
graph[3] = [1, 2]
graph[4] = [1]
graph
for node in graph:
print(node, graph[node])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A dictionary can store values for a key. In this example, we will store the value "world", at the key "hello".
Step2: One nice property of dicts is that they can store more key/value pairs.
Step3: What happens if you ask for an unknown key?
Step4: We can update the dict, and ask again
Step5: More tricks with dicts
Step6: Since keys can be any string, we can use a similar loop to count triplet (or codon) frequency
Step7: Using dicts to build graphs
Step8: Now we can loop over the nodes, and extract the edges.
|
2,584
|
<ASSISTANT_TASK:>
Python Code:
print("Most billionaires are from the following countries in descending order:")
df['countrycode'].value_counts().head(5)
us = 903 / 1000000000
ger = 160 / 1000000000
china = 153 / 1000000000
russia = 119 / 1000000000
japan = 96 / 1000000000
print("per billion for us is", us, "for germany is", ger, "for china is", china, "for russia is", russia, "for japan is", japan)
df['networthusbillion'].describe()
print("Average wealth of a billionaire is 3.531943")
male = df[df['gender'] == "male"]
male.head()
male['networthusbillion'].describe()
print("The average wealth of male billionaires is 3.516881")
female = df[df['gender'] == "female"]
female['networthusbillion'].describe()
print("The average wealth of female billionaires is 3.819277")
print("Most common source of wealth are:")
df['sourceofwealth'].value_counts().head()
print("Most common source of wealth for male billionaires are:")
male['sourceofwealth'].value_counts().head()
print("Most common source of wealth for female billionaires are:")
female['sourceofwealth'].value_counts().head()
bill = df.sort_values('networthusbillion', ascending=False).head(10)
df.sort_values('networthusbillion', ascending=False).head(10)
print("A precise list of billionaires, wealth and rank is given below:")
columns_want = bill[['name', 'rank', 'networthusbillion']]
columns_want
us_gdp = 7419
wealth_rich = 76
percent = round((wealth_rich * 100) / us_gdp)
print(percent, "% of the US GDP is their wealth")
print("the most common industries for billionaires to come from are:")
df['industry'].value_counts()
columns_we_want = df[['name', 'networthusbillion', 'industry']]
columns_we_want
print("the total amount of billionaire money from each industry are given below:")
columns_we_want.groupby('industry').describe()
#columnswant = df[['name', 'networthusbillion', 'selfmade']]
#columnswant
print("The number of selfmade billionaires are:")
df['selfmade'].value_counts()
columns_want = df[['name', 'age', 'selfmade']]
columns_want.head(10)
columns_want = df[['name', 'age', 'industry']]
columns_want.head(10)
columns_want.sort_values('age', ascending=False)
is_in_us = df[df['countrycode'] == "USA"]
is_in_us['networthusbillion'].describe()
print("The total wealth of billionaires in US is 903")
gdp_india = 2066.90
us_bill_wealth = 903
percent = round((us_bill_wealth * 100) / gdp_india)
print(percent, "% of the India GDP is the wealth of US billionaires")
df.sort_values('networthusbillion').head(10)
print("The poorest billionaire is")
df.sort_values('networthusbillion').head(1)
print("The ten youngest billionaires are: ")
df.sort_values('age').head(10)
print("The ten oldest billionaires are: ")
df.sort_values('age', ascending=False).head(10)
columns_want = df[['name', 'age', 'industry']]
columns_want.sort_values('age', ascending=False).head(10)
import matplotlib.pyplot as plt
%matplotlib inline
df.plot(kind='scatter', x='age', y='networthusbillion')
print("The most common relationships are:")
df['relationshiptocompany'].value_counts().head()
print("Relationship to a company is describes the billionaire's relationship to the company primarily responsible for their wealth, such as founder, executive, relation, or shareholder")
columnswant
sort_df = df.sort_values('networthusbillion')
sort_df.plot(kind='line', x='rank', y='networthusbillion')
df.plot(kind='bar', x='name', y='networthusbillion')
df.plot(kind='scatter', x='age', y='networthusbillion')
df['networthusbillion'].head(10).plot(kind='bar', x='name', y='networthusbillion')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. What's the average wealth of a billionaire? Male? Female?
Step2: 3. Most common source of wealth? Male vs. female?
Step3: 4. List top ten billionaires
Step4: 5.Given the richest person in a country, what % of the GDP is their wealth?
Step5: 6.What are the most common industries for billionaires to come from? What's the total amount of billionaire money from each industry?
Step6: 7.How many self made billionaires vs. others?
Step7: 8.How old are billionaires? How old are billionaires self made vs. non self made? or different industries?
Step8: 9.Add up the wealth of all of the billionaires in a given country (or a few countries) and then compare it to the GDP of the country, so like pit the US vs India
Step9: Compare the total wealth of billionaires in US to the GDP of the country, so like pit the US vs India¶
Step10: 10. List top 10 poorest billionaires. Who is the poorest billionare ?
Step11: 11. List ten youngest billionaires, list ten oldest billionaires, and plot and age distribution graph
Step12: 11b. Plot an age distribution graph
Step13: 12. What is relationship to company? And what are the most common relationships?
Step14: 13.Maybe just made a graph about how wealthy they are in general?
Step15: 14.Maybe plot their net worth vs age (scatterplot)
Step16: 15.Make a bar graph of the top 10 or 20 richest
|
2,585
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
import matplotlib.pyplot as pyplot
import numpy
import os
import time
# tensorflow
import tensorflow as tf
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
# python3-6 NCS. This provideds the OTF and the simulated images.
import pyNCS
import pyNCS.denoisetools as ncs
# python3 and C NCS.
import pyCNCS.ncs_c as ncsC
# Generate the same random noise each time.
numpy.random.seed(1)
py_ncs_path = os.path.dirname(os.path.abspath(pyNCS.__file__))
print(py_ncs_path)
# create normalized ideal image
fpath1 = os.path.join(py_ncs_path, "../randwlcposition.mat")
imgsz = 128
zoom = 8
Pixelsize = 0.1
NA = 1.4
Lambda = 0.7
t = time.time()
res = ncs.genidealimage(imgsz,Pixelsize,zoom,NA,Lambda,fpath1)
elapsed = time.time()-t
print('Elapsed time for generating ideal image:', elapsed)
imso = res[0]
pyplot.imshow(imso,cmap="gray")
# select variance map from calibrated map data
fpath = os.path.join(py_ncs_path, "../gaincalibration_561_gain.mat")
noisemap = ncs.gennoisemap(imgsz,fpath)
varsub = noisemap[0]*10 # increase the readout noise by 10 to demonstrate the effect of NCS algorithm
gainsub = noisemap[1]
# generate simulated data
I = 100
bg = 10
offset = 100
N = 1
dataimg = ncs.gendatastack(imso,varsub,gainsub,I,bg,offset,N)
imsd = dataimg[1]
print(imsd.shape)
alpha = 0.1
# Get the OTF mask that NCSDemo_simulation.py used.
rcfilter = ncs.genfilter(128,Pixelsize,NA,Lambda,'OTFweighted',1,0.7)
print(rcfilter.shape)
pyplot.imshow(rcfilter, cmap = "gray")
pyplot.show()
# Calculate gamma and run Python/C NCS.
gamma = varsub/(gainsub*gainsub)
# This takes ~100ms on my laptop.
out_c = ncsC.pyReduceNoise(imsd[0], gamma, rcfilter, alpha)
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(imsd[0],aspect='equal',cmap="gray")
ax2.imshow(out_c,aspect ='equal',cmap="gray")
pyplot.show()
py_otf_mask = numpy.fft.fftshift(rcfilter.astype(numpy.float32))
FITMIN = tf.constant(1.0e-6)
tf_alpha = tf.constant(numpy.float32(alpha))
tf_data = tf.Variable(imsd[0].astype(numpy.float32), shape = (128, 128), trainable=False)
tf_gamma = tf.constant(gamma.astype(numpy.float32))
tf_rc = tf.constant(py_otf_mask*py_otf_mask/(128.0*128.0))
tf_u = tf.Variable(imsd[0].astype(numpy.float32), shape = (128, 128), trainable=True)
# Tensorflow cost function.
@tf.function
def cost():
## LL
t1 = tf.math.add(tf_data, tf_gamma)
t2 = tf.math.add(tf_u, tf_gamma)
t2 = tf.math.maximum(t2, FITMIN)
t2 = tf.math.log(t2)
t2 = tf.math.multiply(t1, t2)
t2 = tf.math.subtract(tf_u, t2)
c1 = tf.math.reduce_sum(t2)
## NC
t1 = tf.dtypes.complex(tf_u, tf.zeros_like(tf_u))
t2 = tf.signal.fft2d(t1)
t2 = tf.math.multiply(t2, tf.math.conj(t2))
t2 = tf.math.abs(t2)
t2 = tf.math.multiply(t2, tf_rc)
c2 = tf.math.reduce_sum(t2)
c2 = tf.math.multiply(tf_alpha, c2)
return tf.math.add(c1, c2)
# Gradient Descent Optimizer.
#
# This takes ~700ms on my laptop, so about 7x slower.
tf_data.assign(numpy.copy(imsd[0]))
tf_u.assign(tf_data.numpy())
for i in range(100):
if((i%10)==0):
print(cost().numpy())
opt = gradient_descent.GradientDescentOptimizer(2.0).minimize(cost)
out_tf = tf_u.numpy()
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,4))
ax1.imshow(out_c,aspect='equal',cmap="gray")
ax2.imshow(out_tf,aspect ='equal',cmap="gray")
pyplot.show()
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out_c - out_tf))))
# AdamOptimizer.
#
# This takes ~1.5ms on my laptop, so about 15x slower.
tf_data.assign(numpy.copy(imsd[0]))
tf_u.assign(tf_data.numpy())
for i in range(100):
if((i%10)==0):
print(cost().numpy())
opt = adam.AdamOptimizer(0.8).minimize(cost)
out_tf_2 = tf_u.numpy()
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,4))
ax1.imshow(out_c,aspect='equal',cmap="gray")
ax2.imshow(out_tf_2,aspect ='equal',cmap="gray")
pyplot.show()
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out_c - out_tf_2))))
# Adagrad.
#
# This takes ~950ms on my laptop, so about 9.5x slower.
tf_data.assign(numpy.copy(imsd[0]))
tf_u.assign(tf_data.numpy())
for i in range(100):
if((i%10)==0):
print(cost().numpy())
opt = adagrad.AdagradOptimizer(0.8).minimize(cost)
out_tf_3 = tf_u.numpy()
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,4))
ax1.imshow(out_c,aspect='equal',cmap="gray")
ax2.imshow(out_tf_3,aspect ='equal',cmap="gray")
pyplot.show()
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out_c - out_tf_3))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pyNCS analysis
Step2: pyCNCS analysis
Step3: Compare results to reference implementation.
Step4: Tensorflow
|
2,586
|
<ASSISTANT_TASK:>
Python Code:
mu = [2, 3]
cov = [[2, -1],[2, 4]]
rv = sp.stats.multivariate_normal(mu, cov)
xx = np.linspace(-1, 5, 150)
yy = np.linspace(0, 6, 120)
XX, YY = np.meshgrid(xx, yy)
ZZ = rv.pdf(np.dstack([XX, YY]))
plt.contour(XX, YY, ZZ)
plt.xlabel("x")
plt.ylabel("y")
plt.title("Joint Probability Density")
plt.axis("equal")
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.contour(XX, YY, ZZ, levels=np.linspace(0, 0.1, 20))
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("Joint Probability Density")
plt.show()
pmf = np.array([[0, 0, 0, 0, 1, 1],
[0, 0, 1, 2, 1, 0],
[0, 1, 3, 3, 1, 0],
[0, 1, 2, 1, 0, 0],
[1, 1, 0, 0, 0, 0]])
pmf = pmf / pmf.sum()
pmf
sns.heatmap(pmf)
plt.xlabel("x")
plt.ylabel("y")
plt.title("Joint Probability Mass Function")
plt.show()
pmf
pmf_marginal_x = pmf.sum(axis=0)
pmf_marginal_x
pmf_marginal_y = pmf.sum(axis=1)
pmf_marginal_y[:, np.newaxis]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.contour(XX, YY, 0.4*ZZ, levels=np.linspace(0, 0.04, 30), alpha=0.3)
ax.plot(yy, ZZ.mean(axis=1), zdir='x', lw=3)
ax.plot(xx, ZZ.mean(axis=0), zdir='y', lw=3)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("Marginal Probability Density")
ax.view_init(55, -40)
plt.show()
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(8, 12), subplot_kw={'projection': '3d'})
ax1.plot_wireframe(XX, YY, ZZ, rstride=30, cstride=0, lw=3)
ax1.set_xlabel("x")
ax1.set_ylabel("y")
ax1.set_title("Conditional Probability Density $f(x \mid y)$")
ax2.plot_wireframe(XX, YY, ZZ, rstride=0, cstride=30, lw=3)
ax2.set_xlabel("x")
ax2.set_ylabel("y")
ax2.set_title("Conditional Probability Density $f(y \mid x)$")
plt.tight_layout()
plt.show()
pmf
conf_y0 = pmf[0, :] / pmf_marginal_y[0]
conf_y0
cond_y1 = pmf[1, :] / pmf_marginal_y[1]
cond_y1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 동일한 결합 확률 밀도 함수를 3차원으로 그리면 아래와 같다.
Step2: 이산 확률 변수의 결합 확률 질량 함수
Step3: 주변 확률 밀도 함수
Step4: 위에서 예로 든 연속 확률 변수의 경우에 주변 확률 밀도 함수를 계산하면 다음과 같다.
Step5: 다변수 가우시안 정규분포의 첫 번째 꼭지점이 높은 이유는? 맨처음 올라가는 것의 미분 기울기가 급격하게 올라가기 때문
Step6: 위에서 예로 든 이산 확률 변수의 경우에 조건부 확률 질량 함수를 계산하면 다음과 같다.
|
2,587
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import xarray as xr
from netCDF4 import num2date
import matplotlib.pyplot as plt
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
dpm = {'noleap': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'365_day': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'standard': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'proleptic_gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'all_leap': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'366_day': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'360_day': [0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30]}
def leap_year(year, calendar='standard'):
Determine if year is a leap year
leap = False
if ((calendar in ['standard', 'gregorian',
'proleptic_gregorian', 'julian']) and
(year % 4 == 0)):
leap = True
if ((calendar == 'proleptic_gregorian') and
(year % 100 == 0) and
(year % 400 != 0)):
leap = False
elif ((calendar in ['standard', 'gregorian']) and
(year % 100 == 0) and (year % 400 != 0) and
(year < 1583)):
leap = False
return leap
def get_dpm(time, calendar='standard'):
return a array of days per month corresponding to the months provided in `months`
month_length = np.zeros(len(time), dtype=np.int)
cal_days = dpm[calendar]
for i, (month, year) in enumerate(zip(time.month, time.year)):
month_length[i] = cal_days[month]
if leap_year(year, calendar=calendar):
month_length[i] += 1
return month_length
monthly_mean_file = 'RASM_example_data.nc'
ds = xr.open_dataset(monthly_mean_file, decode_coords=False)
print(ds)
# Make a DataArray with the number of days in each month, size = len(time)
month_length = xr.DataArray(get_dpm(ds.time.to_index(),
calendar='noleap'),
coords=[ds.time], name='month_length')
# Calculate the weights by grouping by 'time.season'.
seasons = month_length.groupby('time.season')
weights = seasons / seasons.sum()
# Test that the sum of the weights for each season is 1.0
np.testing.assert_allclose(weights.groupby('time.season').sum().values, np.ones(4))
# Calculate the weighted average
ds_weighted = (ds * weights).groupby('time.season').sum(dim='time')
ds_weighted
# only used for comparisons
ds_unweighted = ds.groupby('time.season').mean('time')
ds_diff = ds_weighted - ds_unweighted
# Quick plot to show the results
is_null = np.isnan(ds_unweighted['Tair'][0].values)
fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(14,12))
for i, season in enumerate(('DJF', 'MAM', 'JJA', 'SON')):
plt.sca(axes[i, 0])
plt.pcolormesh(np.ma.masked_where(is_null, ds_weighted['Tair'].sel(season=season).values),
vmin=-30, vmax=30, cmap='Spectral_r')
plt.colorbar(extend='both')
plt.sca(axes[i, 1])
plt.pcolormesh(np.ma.masked_where(is_null, ds_unweighted['Tair'].sel(season=season).values),
vmin=-30, vmax=30, cmap='Spectral_r')
plt.colorbar(extend='both')
plt.sca(axes[i, 2])
plt.pcolormesh(np.ma.masked_where(is_null, ds_diff['Tair'].sel(season=season).values),
vmin=-0.1, vmax=.1, cmap='RdBu_r')
plt.colorbar(extend='both')
for j in range(3):
axes[i, j].axes.get_xaxis().set_ticklabels([])
axes[i, j].axes.get_yaxis().set_ticklabels([])
axes[i, j].axes.axis('tight')
axes[i, 0].set_ylabel(season)
axes[0, 0].set_title('Weighted by DPM')
axes[0, 1].set_title('Equal Weighting')
axes[0, 2].set_title('Difference')
plt.tight_layout()
fig.suptitle('Seasonal Surface Air Temperature', fontsize=16, y=1.02)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some calendar information so we can support any netCDF calendar.
Step4: A few calendar functions to determine the number of days in each month
Step5: Open the Dataset
Step6: Now for the heavy lifting
|
2,588
|
<ASSISTANT_TASK:>
Python Code:
import rebound
import reboundx
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1.e-3, a=1., e=0.2)
ps = sim.particles
rebx = reboundx.Extras(sim)
cf = rebx.load_force("central_force")
rebx.add_force(cf)
ps[0].params["gammacentral"] = -1. # period needed after integer power
ps[0].params["Acentral"] = 1.e-4
pomegadot = 1.e-3 # has dimensions of inverse time, in whatever units the simulation uses.
ps[0].params["Acentral"] = rebx.central_force_Acentral(ps[1], ps[0], pomegadot, ps[0].params["gammacentral"])
E0 = sim.calculate_energy() + rebx.central_force_potential()
import numpy as np
Nout=1000
pomega, Eerr = np.zeros(Nout), np.zeros(Nout)
times = np.linspace(0,3.e4,Nout)
for i, time in enumerate(times):
sim.integrate(time)
pomega[i] = ps[1].pomega
E = sim.calculate_energy() + rebx.central_force_potential()
Eerr[i] = abs((E-E0)/E0)
%matplotlib inline
import matplotlib.pyplot as plt
fig, axarr = plt.subplots(nrows=2, figsize=(12,8))
axarr[0].plot(times, pomega, '.')
axarr[0].set_ylabel("Pericenter", fontsize=24)
axarr[1].plot(times, Eerr, '.')
axarr[1].set_xscale('log')
axarr[1].set_yscale('log')
axarr[1].set_xlabel('Time', fontsize=24)
axarr[1].set_ylabel('Energy Error', fontsize=24)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we add REBOUNDx and our effect as usual
Step2: We need to choose a normalization Acentral and power gammacentral for our force law (see above), which we assign to the central particle itself. gammacentral must be a float (add a dot after an integer power).
Step3: We can also (instead) use the function rebx.central_force_Acentral to calculate the Acentral required for a particular particle (here ps[1]) around a primary (ps[0]) to have a pericenter precession rate of pomegadot, given a power gammacentral
Step4: We can include the force's contribution to the total energy
Step5: Now we integrate, keep track of the pericenter and the relative energy error, then plot the results
|
2,589
|
<ASSISTANT_TASK:>
Python Code:
# import libraries
# linear algebra
import numpy as np
# data processing
import pandas as pd
# library of math
import math
# data visualization
from matplotlib import pyplot as plt
# datasets
from sklearn import datasets
# Scikit Learning hierarchical clustering
from sklearn.cluster import AgglomerativeClustering
# SciPy hierarchical clustering
from scipy.cluster import hierarchy
# Dataset
tmp_data = np.array([[1,1],[2,2],[2,3],[4,4],
[4,2],[6,6],[10,9],[7,7],
[8,8],[7,9],[10,10],[14,2]])
plt.scatter(tmp_data[:,0], tmp_data[:,1], s=150)
plt.show()
# Teste diferentes funções de ligações
# {“ward”, “complete”, “average”}
linkage = 'ward'
# Clusterização hierárquica
ag_model = AgglomerativeClustering(linkage=linkage)
# Número de colunas do plot
plot_col = 3
n_rows, n_col = tmp_data.shape
fig, ax = plt.subplots(nrows=math.ceil(len(tmp_data)/plot_col),
ncols=plot_col,figsize=(20,20))
count = 0
for n_clusters in range(len(tmp_data),0,-1):
index = abs(n_clusters - len(tmp_data))
# Atribui o número de clusters
ag_model.n_clusters = n_clusters
# Ajusta o modelo
ag_model = ag_model.fit(tmp_data)
if plot_col == 1:
ax[count].scatter(tmp_data[:,0], tmp_data[:,1], c=ag_model.labels_, s=150)
ax[count].set_title("Qt. Clusters: " + str(n_clusters))
else:
ax[count, (index)%plot_col].scatter(tmp_data[:,0], tmp_data[:,1], c=ag_model.labels_, s=150)
ax[count, (index)%plot_col].set_title("Qt. Clusters: " + str(n_clusters))
if (index+1) % plot_col == 0:
count += 1
plt.show()
# Usa o método de linkage especificado
# para construir o dendrograma
if (linkage == 'average'):
Z = hierarchy.average(tmp_data)
elif (linkage == 'complete'):
Z = hierarchy.complete(tmp_data)
elif (linkage == 'ward'):
Z = hierarchy.ward(tmp_data)
plt.figure()
plt.title("Dendrograma - linkage: " + str(linkage))
dn = hierarchy.dendrogram(Z)
# blobs with varied variances
n_samples = 1500
random_state = 170
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
dataset_varied = varied[0]
plt.scatter(dataset_varied[:,0], dataset_varied[:,1])
plt.show()
import nbimporter
from Notebook_KMeans import KMeans
### CODE HERE ###
### CODE HERE ###
### CODE HERE ###
# Gerar o conjunto de dados
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
dataset_circles = noisy_circles[0]
# Mostrar os dados
plt.scatter(dataset_circles[:,0], dataset_circles[:,1])
plt.show()
### CODE HERE ###
### CODE HERE ###
### CODE HERE ###
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
dataset = np.dot(X, transformation)
# Mostrar os dados
plt.scatter(dataset[:,0], dataset[:,1])
plt.show()
### CODE HERE ###
### CODE HERE ###
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.1 Clusterização Hierárquica
Step2: 1.2 Dendrograma
Step3: É possível fazer um teste de permutação para validar o número de clusters escolhidos, ou seja, verificar se realmente existe uma tendência não aleatória para os objetos se agruparem.
Step4: Utilize o algoritmo do K-means para separa os conjuntos.
Step5: Os dados parecem estar agrupados corretamente? Tente fazer o mesmo procedimento com algoritmo de clusterização hieráquica
Step6: Mostre e análise o dendrograma do exemplo acima.
Step7: Qual a sua conclusão?
Step8: Utilize o algoritmo do K-means para separa os conjuntos.
Step9: Os dados parecem estar agrupados corretamente? Tente fazer o mesmo procedimento com algoritmo de clusterização hieráquica, altera os parâmetros se for necessário.
Step10: Mostre e análise o dendrograma do exemplo acima.
Step11: Qual a sua conclusão?
Step12: Clusterize os dados usando clusterização Hierárquica
Step13: Clusterize os dados usando o algoritmo DBSCAN.
|
2,590
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('/data/dog_images/train')
valid_files, valid_targets = load_dataset('/data/dog_images/valid')
test_files, test_targets = load_dataset('/data/dog_images/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("/data/dog_images/train/*/"))]
# print statistics about the dataset
print('There are %d total dog categories.' % len(dog_names))
print('There are %s total dog images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training dog images.' % len(train_files))
print('There are %d validation dog images.' % len(valid_files))
print('There are %d test dog images.'% len(test_files))
import random
random.seed(8675309)
# load filenames in shuffled human dataset
human_files = np.array(glob("/data/lfw/*/*"))
random.shuffle(human_files)
# print statistics about the dataset
print('There are %d total human images.' % len(human_files))
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# extract pre-trained face detector
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
# load color (BGR) image
img = cv2.imread(human_files[3])
# convert BGR image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces in image
faces = face_cascade.detectMultiScale(gray)
# print number of faces detected in the image
print('Number of faces detected:', len(faces))
# get bounding box for each detected face
for (x,y,w,h) in faces:
# add bounding box to color image
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# convert BGR image to RGB for plotting
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# display the image, along with bounding box
plt.imshow(cv_rgb)
plt.show()
# returns "True" if face is detected in image stored at img_path
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
human_files_short = human_files[:100]
dog_files_short = train_files[:100]
# Do NOT modify the code above this line.
## TODO: Test the performance of the face_detector algorithm
## on the images in human_files_short and dog_files_short.
humans = 0
dogs = 0
for img in human_files_short:
if face_detector(img) :
humans = humans + 1
for img in dog_files_short:
if face_detector(img) :
dogs = dogs + 1
accuracy_on_humans = humans / 100.0
accuracy_on_dogs = dogs / 100.0
print ("Accuracy on humans is " + str(accuracy_on_humans))
print ("Accuracy on dogs is " + str(accuracy_on_dogs))
## (Optional) TODO: Report the performance of another
## face detection algorithm on the LFW dataset
### Feel free to use as many code cells as needed.
from keras.applications.resnet50 import ResNet50
# define ResNet50 model
ResNet50_model = ResNet50(weights='imagenet')
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
from keras.applications.resnet50 import preprocess_input, decode_predictions
def ResNet50_predict_labels(img_path):
# returns prediction vector for image located at img_path
img = preprocess_input(path_to_tensor(img_path))
return np.argmax(ResNet50_model.predict(img))
### returns "True" if a dog is detected in the image stored at img_path
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
### TODO: Test the performance of the dog_detector function
### on the images in human_files_short and dog_files_short.
humans = 0
dogs = 0
for img in human_files_short:
if dog_detector(img) :
humans = humans + 1
for img in dog_files_short:
if dog_detector(img) :
dogs = dogs + 1
accuracy_on_humans = humans / 100.0
accuracy_on_dogs = dogs / 100.0
print ("Accuracy on humans is " + str(accuracy_on_humans))
print ("Accuracy on dogs is " + str(accuracy_on_dogs))
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense, Activation
from keras.models import Sequential
model = Sequential()
### TODO: Define your architecture.
# 1st Layer - Add an input layer of 32 nodes with the same input shape as
# the training samples in X
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu',
input_shape=(224, 224, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu',
input_shape=(110, 110, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu',
input_shape=(54, 54, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(GlobalAveragePooling2D())
model.add(Dense(133, activation='softmax'))
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
### TODO: specify the number of epochs that you would like to use to train the model.
epochs = 20
### Do NOT modify the code below this line.
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
# get index of predicted dog breed for each image in test set
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
bottleneck_features = np.load('/data/bottleneck_features/DogVGG16Data.npz')
train_VGG16 = bottleneck_features['train']
valid_VGG16 = bottleneck_features['valid']
test_VGG16 = bottleneck_features['test']
VGG16_model = Sequential()
VGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))
VGG16_model.add(Dense(133, activation='softmax'))
VGG16_model.summary()
VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
VGG16_model.fit(train_VGG16, train_targets,
validation_data=(valid_VGG16, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')
# get index of predicted dog breed for each image in test set
VGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]
# report test accuracy
test_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
from extract_bottleneck_features import *
def VGG16_predict_breed(img_path):
# extract bottleneck features
bottleneck_feature = extract_VGG16(path_to_tensor(img_path))
# obtain predicted vector
predicted_vector = VGG16_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)]
### TODO: Obtain bottleneck features from another pre-trained CNN.
bottleneck_features = np.load('/data/bottleneck_features/DogXceptionData.npz')
train_Xception = bottleneck_features['train']
valid_Xception = bottleneck_features['valid']
test_Xception = bottleneck_features['test']
### TODO: Define your architecture.
Xception_model = Sequential()
Xception_model.add(GlobalAveragePooling2D(input_shape=train_VGG19.shape[1:]))
Xception_model.add(Dense(133, activation='softmax'))
Xception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
Xception_model.summary()
### TODO: Compile the model.
Xception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
### TODO: Train the model.
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.Xception.hdf5',
verbose=1, save_best_only=True)
history = Xception_model.fit(train_Xception, train_targets,
validation_data=(valid_Xception, valid_targets),
epochs=50, batch_size=100, callbacks=[checkpointer], verbose=1)
### TODO: Load the model weights with the best validation loss.
Xception_model.load_weights('saved_models/weights.best.Xception.hdf5')
### TODO: Calculate classification accuracy on the test dataset.
# get index of predicted dog breed for each image in test set
Xception_predictions = [np.argmax(Xception_model.predict(np.expand_dims(feature, axis=0))) for feature in test_Xception]
# report test accuracy
test_accuracy = 100*np.sum(np.array(Xception_predictions)==np.argmax(test_targets, axis=1))/len(Xception_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
### TODO: Write a function that takes a path to an image as input
### and returns the dog breed that is predicted by the model.
def Xception_predict_breed(img_path):
# extract bottleneck features
bottleneck_feature = extract_Xception(path_to_tensor(img_path))
# obtain predicted vector
predicted_vector = Xception_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return (dog_names[np.argmax(predicted_vector)], predicted_vector[0][np.argmax(predicted_vector)])
import glob
from PIL import Image
from io import BytesIO
from IPython.display import HTML
def get_thumbnail(path):
i = Image.open(path).convert('RGB')
i.thumbnail((150, 150), Image.LANCZOS)
return i
def image_base64(im):
if isinstance(im, str):
im = get_thumbnail(im)
with BytesIO() as buffer:
im.save(buffer, 'jpeg')
return base64.b64encode(buffer.getvalue()).decode()
def image_formatter(im):
return f'<img src="data:image/jpeg;base64,{image_base64(im)}">'
def file_formatter(file_name):
return f'<img src="{file_name}" width="150" height="150">'
predictions_and_images = dict()
for filename in glob.iglob('dogs/*'):
label, score = Xception_predict_breed(filename)
print("Predicting that image in {0}, depicts a {1}, with a score {2}".format(filename, label, score))
predictions_and_images[filename] = label
import pandas as pd
import base64
from IPython.display import HTML
predictions_and_images
predictions_df = pd.DataFrame.from_dict(predictions_and_images, orient='index')
predictions_df.reset_index(inplace=True)
predictions_df.columns = ['file', 'label']
predictions_df['image'] = predictions_df.file.map(lambda f: get_thumbnail(f))
predictions_df
HTML(predictions_df[['label', 'file']].to_html(formatters={'file': file_formatter}, escape=False))
### TODO: Write your algorithm.
### Feel free to use as many code cells as needed.
def predict_for_image(filename):
label, score = Xception_predict_breed(filename)
if (score > 0.95) :
predictions_and_images[filename] = ('dog', label, score)
elif face_detector(filename):
predictions_and_images[filename] = ('human', label, score)
else:
predictions_and_images[filename] = ('neither', label, score)
return predictions_and_images
## Execute your algorithm from Step 6 on
## at least 6 images on your computer.
## Feel free to use as many code cells as needed.
for filename in glob.iglob('random_images/*'):
type_of_animal, label, score = predict_for_image(filename)[filename]
print("Predicting that image in {0}, depicts a {1}, closest dog breed {2}, score = {3}".format(filename, type_of_animal, label, score))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import Human Dataset
Step2: <a id='step1'></a>
Step 1
Step3: Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The detectMultiScale function executes the classifier stored in face_cascade and takes the grayscale image as a parameter.
Step4: (IMPLEMENTATION) Assess the Human Face Detector
Step5: Question 2
Step6: <a id='step2'></a>
Step 2
Step7: Pre-process the Data
Step8: Making Predictions with ResNet-50
Step9: Write a Dog Detector
Step10: (IMPLEMENTATION) Assess the Dog Detector
Step11: <a id='step3'></a>
Step 3
Step12: (IMPLEMENTATION) Model Architecture
Step13: Compile the Model
Step14: (IMPLEMENTATION) Train the Model
Step15: Load the Model with the Best Validation Loss
Step16: Test the Model
Step17: <a id='step4'></a>
Step 4
Step18: Model Architecture
Step19: Compile the Model
Step20: Train the Model
Step21: Load the Model with the Best Validation Loss
Step22: Test the Model
Step23: Predict Dog Breed with the Model
Step24: <a id='step5'></a>
Step 5
Step25: (IMPLEMENTATION) Model Architecture
Step26: (IMPLEMENTATION) Compile the Model
Step27: (IMPLEMENTATION) Train the Model
Step28: (IMPLEMENTATION) Load the Model with the Best Validation Loss
Step29: (IMPLEMENTATION) Test the Model
Step30: (IMPLEMENTATION) Predict Dog Breed with the Model
Step31: <a id='step6'></a>
Step 6
Step32: <a id='step7'></a>
Step 7
|
2,591
|
<ASSISTANT_TASK:>
Python Code:
import pyspark.sql.functions as sql
import pyspark.sql.types as types
idb_df_version = "20170130"
idb_df = sqlContext.read.parquet("/guoda/data/idigbio-{0}.parquet".format(idb_df_version))
idb_df.count()
subset = (idb_df
.select(idb_df.catalognumber)
.where(idb_df.recordset == "271a9ce9-c6d3-4b63-a722-cb0adc48863f")
)
subset.cache()
subset.count()
print(subset.where(subset.catalognumber == "0").count())
def to_int(s):
try:
return int(s)
except:
# 0 is a terrible flag value but it is graphable so we can see
# how bad things are
return 0
to_int_udf = sql.udf(to_int, types.IntegerType())
catalognumbers = (subset
.withColumn("number", to_int_udf(subset.catalognumber))
)
print(catalognumbers.where(catalognumbers.number == 0).count())
catalognumbers.where(catalognumbers.number == 0).head(10)
catalognumbers.where("catalognumber='r-70049' OR catalognumber='70049'").head(10)
catalognumbers_pd = (catalognumbers
.select(catalognumbers.number)
.sort(catalognumbers.number)
.toPandas()
)
catalognumbers_pd.describe()
catalognumbers_pd[-10:]
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(catalognumbers_pd.index.values[:-3],
catalognumbers_pd[:-3]["number"])
plt.axis([0, 2000000, 0, 2000000])
plt.xlabel("Rank of specimen record")
plt.ylabel("Numeric catalog number")
plt.title("Catalog numbers from Museum of Comparative Zoology,\nHarvard University (271a9ce9-c6d3-4b63-a722-cb0adc48863f)")
x_start = 1418000
x_end = 1419000
y_start = 199000
y_end = 201000
plt.plot(catalognumbers_pd.index.values[x_start:x_end],
catalognumbers_pd[x_start:x_end]["number"])
plt.axis([x_start, x_end, y_start, y_end])
x_start = 1850000
x_end = 1875000
y_start = 600000
y_end = 700000
plt.plot(catalognumbers_pd.index.values[x_start:x_end],
catalognumbers_pd[x_start:x_end]["number"])
plt.axis([x_start, x_end, y_start, y_end])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To test our code, find one collection that seems to have numeric ids. Go to the search API and find the most common catalog number
Step2: Is there anything that is not numeric?
Step3: Well there certainly is and there are repeated catalog numbers too. We certainly can't assume that 'r-70049' is the same as '70049' and if we could we certainly can't guess at the catalog number practices across collections.
Step4: Let's zoom in on some interesting parts.
|
2,592
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
products = graphlab.SFrame('amazon_baby_subset.gl/')
products['sentiment']
products.head(10)['name']
print '# of positive reviews =', len(products[products['sentiment']==1])
print '# of negative reviews =', len(products[products['sentiment']==-1])
import json
with open('important_words.json', 'r') as f: # Reads the list of most frequent words
important_words = json.load(f)
important_words = [str(s) for s in important_words]
print important_words
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
products['review_clean'] = products['review'].apply(remove_punctuation)
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
products['perfect']
products['perfect'].apply(lambda i: 1 if i>=1 else 0).sum()
import numpy as np
def get_numpy_data(data_sframe, features, label):
data_sframe['intercept'] = 1
features = ['intercept'] + features
features_sframe = data_sframe[features]
feature_matrix = features_sframe.to_numpy()
label_sarray = data_sframe[label]
label_array = label_sarray.to_numpy()
return(feature_matrix, label_array)
# Warning: This may take a few minutes...
feature_matrix, sentiment = get_numpy_data(products, important_words, 'sentiment')
feature_matrix.shape
sentiment
'''
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
# YOUR CODE HERE
v = np.dot(feature_matrix, coefficients)
# Compute P(y_i = +1 | x_i, w) using the link function
# YOUR CODE HERE
predictions = [1/(1+np.e**(-x)) for x in v]
# return predictions
return predictions
dummy_feature_matrix = np.array([[1.,2.,3.], [1.,-1.,-1]])
dummy_coefficients = np.array([1., 3., -1.])
correct_scores = np.array( [ 1.*1. + 2.*3. + 3.*(-1.), 1.*1. + (-1.)*3. + (-1.)*(-1.) ] )
correct_predictions = np.array( [ 1./(1+np.exp(-correct_scores[0])), 1./(1+np.exp(-correct_scores[1])) ] )
print 'The following outputs must match '
print '------------------------------------------------'
print 'correct_predictions =', correct_predictions
print 'output of predict_probability =', predict_probability(dummy_feature_matrix, dummy_coefficients)
def feature_derivative(errors, feature):
# Compute the dot product of errors and feature
derivative = np.dot(errors, feature)
# Return the derivative
return derivative
def compute_log_likelihood(feature_matrix, sentiment, coefficients):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
logexp = np.log(1. + np.exp(-scores))
# Simple check to prevent overflow
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
lp = np.sum((indicator-1)*scores - logexp)
return lp
dummy_feature_matrix = np.array([[1.,2.,3.], [1.,-1.,-1]])
dummy_coefficients = np.array([1., 3., -1.])
dummy_sentiment = np.array([-1, 1])
correct_indicators = np.array( [ -1==+1, 1==+1 ] )
correct_scores = np.array( [ 1.*1. + 2.*3. + 3.*(-1.), 1.*1. + (-1.)*3. + (-1.)*(-1.) ] )
correct_first_term = np.array( [ (correct_indicators[0]-1)*correct_scores[0], (correct_indicators[1]-1)*correct_scores[1] ] )
correct_second_term = np.array( [ np.log(1. + np.exp(-correct_scores[0])), np.log(1. + np.exp(-correct_scores[1])) ] )
correct_ll = sum( [ correct_first_term[0]-correct_second_term[0], correct_first_term[1]-correct_second_term[1] ] )
print 'The following outputs must match '
print '------------------------------------------------'
print 'correct_log_likelihood =', correct_ll
print 'output of compute_log_likelihood =', compute_log_likelihood(dummy_feature_matrix, dummy_sentiment, dummy_coefficients)
from math import sqrt
def logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
for itr in xrange(max_iter):
# Predict P(y_i = +1|x_i,w) using your predict_probability() function
# YOUR CODE HERE
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = indicator - predictions
for j in xrange(len(coefficients)): # loop over each coefficient
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].
# Compute the derivative for coefficients[j]. Save it in a variable called derivative
# YOUR CODE HERE
derivative = feature_derivative(errors, feature_matrix[:, j])
# add the step size times the derivative to the current coefficient
## YOUR CODE HERE
coefficients[j] += derivative * step_size
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
lp = compute_log_likelihood(feature_matrix, sentiment, coefficients)
print 'iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp)
return coefficients
coefficients = logistic_regression(feature_matrix, sentiment, initial_coefficients=np.zeros(194),
step_size=1e-7, max_iter=301)
# Compute the scores as a dot product between feature_matrix and coefficients.
scores = np.dot(feature_matrix, coefficients)
probs = [1 if x > 0 else 0 for x in scores]
np.sum(probs)
print coefficients
np.sum([1 if x > 0 else 0 for x in sentiment])
predict_labels = [1 if x > 0 else -1 for x in scores]
num_mistakes = np.sum(sentiment != predict_labels) # YOUR CODE HERE
accuracy = 1- num_mistakes * 1.0 / len(sentiment)# YOUR CODE HERE
print "-----------------------------------------------------"
print '# Reviews correctly classified =', len(products) - num_mistakes
print '# Reviews incorrectly classified =', num_mistakes
print '# Reviews total =', len(products)
print "-----------------------------------------------------"
print 'Accuracy = %.2f' % accuracy
coefficients = list(coefficients[1:]) # exclude intercept
word_coefficient_tuples = [(word, coefficient) for word, coefficient in zip(important_words, coefficients)]
word_coefficient_tuples = sorted(word_coefficient_tuples, key=lambda x:x[1], reverse=True)
word_coefficient_tuples[:11]
word_coefficient_tuples[-10:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load review dataset
Step2: One column of this dataset is 'sentiment', corresponding to the class label with +1 indicating a review with positive sentiment and -1 indicating one with negative sentiment.
Step3: Let us quickly explore more of this dataset. The 'name' column indicates the name of the product. Here we list the first 10 products in the dataset. We then count the number of positive and negative reviews.
Step4: Note
Step5: Now, we will perform 2 simple data transformations
Step6: Now we proceed with Step 2. For each word in important_words, we compute a count for the number of times the word occurs in the review. We will store this count in a separate column (one for each word). The result of this feature processing is a single column for each word in important_words which keeps a count of the number of times the respective word occurs in the review text.
Step7: The SFrame products now contains one column for each of the 193 important_words. As an example, the column perfect contains a count of the number of times the word perfect occurs in each of the reviews.
Step8: Now, write some code to compute the number of product reviews that contain the word perfect.
Step9: Quiz Question. How many reviews contain the word perfect?
Step10: We now provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned
Step11: Let us convert the data into NumPy arrays.
Step12: Quiz Question
Step13: Estimating conditional probability with link function
Step14: Aside. How the link function works with matrix algebra
Step15: Compute derivative of log likelihood with respect to a single coefficient
Step16: In the main lecture, our focus was on the likelihood. In the advanced optional video, however, we introduced a transformation of this likelihood---called the log likelihood---that simplifies the derivation of the gradient and is more numerically stable. Due to its numerical stability, we will use the log likelihood instead of the likelihood to assess the algorithm.
Step17: Checkpoint
Step18: Taking gradient steps
Step19: Now, let us run the logistic regression solver.
Step20: Quiz question
Step21: Now, complete the following code block for Step 2 to compute the class predictions using the scores obtained above
Step22: Quiz question
Step23: Measuring accuracy
Step24: Quiz question
Step25: Now, word_coefficient_tuples contains a sorted list of (word, coefficient_value) tuples. The first 10 elements in this list correspond to the words that are most positive.
Step26: Quiz question
|
2,593
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
import tensorflow as tf
import numpy as np
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'traffic-signs-data/train.p'
validation_file= 'traffic-signs-data/valid.p'
testing_file = 'traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = X_train.shape[0]
# TODO: Number of validation examples
n_validation = X_valid.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape
IMG_W, IMG_H = image_shape[1], image_shape[2]
# TODO: How many unique classes/labels there are in the dataset.
classes = np.array(list(set(list(y_train)+list(y_valid)+list(y_test))))
n_classes = len(classes)
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
import matplotlib.pyplot as plt
%matplotlib inline
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)
bins = range(n_classes + 1)
ax1.hist(y_train, bins=bins)
ax1.set_title('Train dist', fontsize=12)
ax2.hist(y_test, bins=bins)
ax2.set_title('Test dist', fontsize=12)
ax3.hist(y_valid, bins=bins)
ax3.set_title('Valid dist', fontsize=12)
plt.tight_layout()
# Function to generate transformed images
# from https://nbviewer.jupyter.org/github/vxy10/SCND_notebooks/blob/master/preprocessing_stuff/img_transform_NB.ipynb
import random
import cv2
from IPython.core.display import Image, display
def transform_image(img,ang_range, trans_range):
'''
This function transforms images to generate new images.
The function takes in following arguments,
1- Image
2- ang_range: Range of angles for rotation
3- trans_range: Range of values to apply translations over.
A Random uniform distribution is used to generate different parameters for transformation
'''
ang_rot = np.random.uniform(ang_range)-ang_range/2
rows,cols,ch = img.shape
Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1)
tr_x = trans_range*np.random.uniform()-trans_range/2
tr_y = trans_range*np.random.uniform()-trans_range/2
Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])
img = cv2.warpAffine(img,Rot_M,(cols,rows))
img = cv2.warpAffine(img,Trans_M,(cols,rows))
return img
classes, counts = np.unique(y_train, return_counts=True)
min_count = 1200
for klass in classes:
klass_indices = np.where(y_train == klass)[0]
img = X_train[random.choice(klass_indices)]
fig, ax1 = plt.subplots(nrows=1, ncols=1)
ax1.set_xlabel(str(klass))
ax1.tick_params(labelbottom='off')
ax1.imshow(img)
if counts[klass] >= min_count:
continue
n_add = min_count - counts[klass]
klass = klass.reshape([1])
tmp_x = []
tmp_y = []
for num in range(n_add):
img = X_train[random.choice(klass_indices)]
orig = img
img = transform_image(img,15,5)
img = img.reshape([1, 32, 32, 3])
tmp_x.append(img)
tmp_y.append(klass)
tmp_x = np.concatenate(tmp_x)
tmp_y = np.concatenate(tmp_y)
X_train = np.concatenate([X_train, tmp_x])
y_train = np.concatenate([y_train, tmp_y])
%matplotlib inline
fig, ax1 = plt.subplots(nrows=1, ncols=1)
bins = range(n_classes + 1)
ax1.hist(y_train, bins=bins)
ax1.set_title('Train dist', fontsize=12)
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
from skimage.color import rgb2gray
from skimage import exposure
import random
def preprocess(d):
tmp = []
for img in d:
i = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
i = cv2.equalizeHist(i)
i = np.dstack((i, i, i))
tmp.append(i)
data = np.array(tmp)
data = data - np.mean(data)
data = data / np.std(data)
return data
X_train = preprocess(X_train)
X_valid = preprocess(X_valid)
X_test = preprocess(X_test)
n_disp = 8
fig, axes = plt.subplots(nrows=1, ncols=n_disp)
for i in range(n_disp):
axes[i].imshow(X_train[random.randint(0, len(X_train) - 1)])
from tensorflow.contrib.layers import flatten
from sklearn.utils import shuffle
def model_lenet(x):
# Hyperparameters
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation.
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Activation.
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, keep_prob)
# Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Activation.
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, keep_prob)
# Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32)
one_hot_y = tf.one_hot(y, 43)
rate = 0.001
EPOCHS = 100
BATCH_SIZE = 128
logits = model_lenet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
predict = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
all_y = []
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
all_y.append(batch_y)
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return (total_accuracy / num_examples), all_y
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.7})
validation_accuracy, _ = evaluate(X_valid, y_valid)
if i%10 == 0:
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, 'model')
print("Model saved")
# Evaluate on the test data
test_accuracy, _ = evaluate(X_test, y_test)
print("Test accuracy = {:.3f}".format(test_accuracy))
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import os
import matplotlib.image as mpimg
def load_extra_images(folder):
images = []
for fname in sorted(os.listdir(folder)):
img = mpimg.imread(os.path.join(folder, fname))
img=cv2.resize(img,(32,32))
if img is not None:
images.append(img)
return images
images = load_extra_images("/home/dman/CarND-Traffic-Sign-Classifier-Project/extra_signs")
fig, axes = plt.subplots(nrows=1, ncols=len(images))
for i in range(len(images)):
axes[i].imshow(images[i])
X_extra = np.concatenate([img.reshape([1, 32, 32, 3]) for img in images])
y_extra = np.array([28, 12, 14, 3, 25, 13])
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
extra_prediction_softmax = tf.nn.softmax(logits)
top_k_ = tf.nn.top_k(extra_classes, k=5, sorted=True)
top_k = sess.run(top_k_, feed_dict={x: X_extra, y: y_extra, keep_prob: 1.0})
pred = np.array([x[0] for x in top_k[1]])
print(pred)
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
arr = np.array([x[0] for x in top_k[1]])
print('accuracy is %s' % (np.sum(arr == y_extra)/float(len(arr))))
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
for i in range(len(top_k[0])):
print('Image', i, '\n\tprobabilities:', top_k[0][i], '\n\tclasses:', top_k[1][i])
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Include an exploratory visualization of the dataset
Step3: Step 2
Step4: Model Architecture
Step5: Train, Validate and Test the Model
Step6: Step 3
Step7: Predict the Sign Type for Each Image
Step8: Analyze Performance
Step9: Output Top 5 Softmax Probabilities For Each Image Found on the Web
Step10: Project Writeup
|
2,594
|
<ASSISTANT_TASK:>
Python Code:
from lxml import etree
tree = etree.parse("data/books.xml")
print(tree)
print(etree.tostring(tree))
print(etree.tostring(tree).decode())
print(etree.tostring(tree, pretty_print=True).decode())
print(len(list(tree.iterfind("//book"))))
for node in tree.iterfind("//book"):
print(node)
print(len(list(tree.iterfind("/catalog"))))
for node in tree.iterfind("/catalog/book"):
print(node)
for node in tree.iterfind("/book"):
print(node.tag)
for node in tree.iterfind("//book"):
print(node.text)
root_node = tree.getroot()
print(root_node.tag)
print(root_node.attrib["author"])
print(root_node.attrib["year"])
for key in root_node.attrib.keys():
print(root_node.attrib[key])
print(len(root_node))
for node in root_node:
print(node.tag)
for node in root_node:
if node.tag != "volta":
line_text = ""
for text in node.itertext():
line_text = line_text + text
print(line_text)
else:
print("=== Volta found! ===")
for node in root_node:
if node.tag == "line":
print(node.attrib["n"])
root_node = tree.getroot()
root_node.attrib["author"] = "J.K. Rowling"
root_node.attrib["year"] = "2015"
root_node.attrib["new_element"] = "dummy string!"
root_node.attrib["place"] = "maynooth"
print(etree.tostring(root_node).decode())
root_node.attrib["year"] = "2015"
break_el = etree.Element("break")
break_el.attrib["author"] = "Mike"
print(etree.tostring(break_el).decode())
for node in tree.iterfind("//line"):
break_el = etree.Element("break")
node.append(break_el)
print(etree.tostring(tree).decode())
break_el = etree.Element("break")
print(etree.tostring(break_el).decode())
break_el.text = "XXX"
print(etree.tostring(break_el).decode())
tree = etree.parse("data/TEI/sonnet18.xml")
root_node = tree.getroot()
for node in root_node:
if node.tag == "line":
v = node.attrib["n"]
break_el = etree.Element("break")
break_el.attrib["n"] = v
node.append(break_el)
print(etree.tostring(tree).decode())
tree = etree.parse("data/TEI/sonnet17.xml")
print(etree.tostring(tree).decode())
# add your parsing code here...
import os
dirname = "data/TEI/french_plays/"
for filename in os.listdir(dirname):
if filename.endswith(".xml"):
print(filename)
import os
dirname="code/data/"
for filename in os.listdir(dirname):
if filename.endswith(".xml"):
print("*****")
print("\t-", filename)
tree = ET.parse(dirname+filename)
author_element = tree.find("//author") # find vs iterfind!
print("\t-", author_element.text)
title_element = tree.find("//title")
print("\t-", title_element.text)
# your code goes here
# from IPython.core.display import HTML
# def css_styling():
# styles = open("styles/custom.css", "r").read()
# return HTML(styles)
# css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For the record, we should mention that there exist many other libraries in Python to parse XML, such as minidom or BeautifulSoup which is an interesting library, when you intend to scrape data from the web. While these might come with more advanced bells and whistles than lxml, they can also be more complex to use, which is why we stick to lxml in this course. Let us now import our sonnet in Python, which has been saved in the file sonnet18.xml
Step2: Python has now read and parsed our xml-file via the etree.parse() function. We have stored our XML tree structure, which is returned by the parse() function, in the tree variable, so that we can access it later. If we print tree as such, we don't get a lot of useful information. To have a closer look at the XML in a printable text version, we need to call the tostring() method on the tree before printing it.
Step3: You'll notice that we actually get a string in a raw format
Step4: If we have more complex data, it might also be to set the pretty_print parameter to True, to obtain a more beautifully formatted string, with Python taking care of indendation etc. In our example, it doesn't change much
Step5: Now let us start processing the contents of our file. Suppose that we are not really interested in the full hierarchical structure of our file, but just in the rhyme words occuring in it. The high-level function interfind() allows us to easily select all rhyme-element in our tree, regardless of where exactly they occur. Because this functions returns a list of nodes, we can simply loop over them
Step6: Note that the search expression ("//rhyme") has two forward slashes before our actual search term. This is in fact XPath syntax, and the two slashes indicate that the search term can occur anywhere (e.g. not necessarily among a node's direct children). Unfortunately, printing the nodes themselves again isn't really insightful
Step7: To extract the actual rhyme word contained in the element, we can use the .text property of the nodes
Step8: That looks better!
Step9: We can access the value of the attributes of an element via .attrib, just like we would access the information in a Python dictionary, that is via key-based indexing. We know that our sonnet element, for instance, should have an author and year attribute. We can inspect the value of these as follows
Step10: If we wouldn't know which attributes were in fact available for a node, we could also retrieve the attribute names by calling keys() on the attributes property of a node, just like we would do with a regular dictionary
Step11: So far so good. Now that we have selected our root element, we can start drilling down our tree's structure. Let us first find out how many child nodes our root element has
Step12: Our root node turns out to have 15 child nodes, which makes a lot of sense, since we have 14 line elements and the volta. We can actually loop over these children, just as we would loop over any other list
Step13: To extract the actual text in our lines, we need one additional for-loop which will allow us to iteratre over the pieces of text under each line
Step14: Note that we get an empty line at the volta, since there isn't any actual text associated with this empty tag.
Step15: Manipulating XML in Python
Step16: That was easy, wasn't it? Did you see we can just add new attributes to an element? Just take care only to put strings as attribute values
Step17: Adding whole elements is fairly easy too. Let's add a single dummy element (<break/>) to indicate a line break at the end of each line. Importantly, we have to create this element inside our loop, before we can add it
Step18: You'll notice that we actually created an empty <break/> tag. Now, let's add it add the end of each line
Step19: Adding an element with actual content is just as easy by the way
Step20: Quiz
Step21: Python for TEI
Step22: Quiz
Step23: A hands-on case study
Step24: OK
Step25: As you can see, we have made you a nice subset selection of this data, containing only texts by the famous pair of brothers
Step26:
|
2,595
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sys
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from io import StringIO
print(sys.version)
print("Pandas:", pd.__version__)
df = pd.read_csv('C:/Users/Peter/Documents/atlas/atlasdata/obs_types/transect.csv', parse_dates=['date'])
df = df.astype(dtype='str')# we don't need numbers in this dataset.
df=df.replace('nan','')
#this turns dates into strings with the proper format for JSON:
#df['date'] = df['date'].dt.strftime('%Y-%m-%d')
df.type = df.type.str.replace('\*remonitoring notes','transect')
df.type = df.type.str.replace('\*plot summary','transect')
df.loc[df.type =='map',['mapPhoto']]=df['url'] #moving cell values to correct column
df.loc[df.type.str.contains('lineminus'),['miscPhoto']]=df['url']
df.loc[df.type.str.contains('lineplus'),['miscPhoto']]=df['url']
df.loc[df.type.str.contains('misc'),['miscPhoto']]=df['url']
#now to deal with type='photo'
photos = df[df.type=='photo']
nonphotos = df[df.type != 'photo'] #we can concatenate these later
grouped = photos.groupby(['id','date'])
photos.shape
values=grouped.groups.values()
for value in values:
photos.loc[value[2],['type']] = 'misc'
#photos.loc[value[1],['type']] = 'linephoto2'
photos.loc[photos.type=='linephoto1']
for name, group in grouped:
print(grouped[name])
photos = df[df.type == 'photo']
photos.set_index(['id','date'],inplace=True)
photos.index[1]
photos=df[df.type=='photo']
photos.groupby(['id','date']).count()
photos.loc[photos.index[25],['type','note']]
#combine photo captions
df['caption']=''
df.loc[(df.type.str.contains('lineminus'))|(df.type.str.contains('lineplus')),['caption']]=df['type'] + ' | ' + df['note']
df.loc[df.type.str.contains('lineplus'),['caption']]=df['url']
df.loc[df.type.str.contains('misc'),['caption']]=df['url']
df['mystart'] = 'Baseline summary:'
df.loc[df.type =='transect',['site_description']]= df[['mystart','label1','value1','label2','value2','label3','value3','note']].apply(' | '.join, axis=1)
df.loc[df.type.str.contains('line-'),['linephoto1']]=df['url']
df.loc[df.type.str.contains('line\+'),['linephoto2']]=df['url']#be sure to escape the +
df.loc[df.type.str.contains('linephoto1'),['linephoto1']]=df['url']
df.loc[df.type.str.contains('linephoto2'),['linephoto2']]=df['url']
df.loc[df.type == 'plants',['general_observations']]=df['note']
#since we're using string methods, NaNs won't work
mycols =['general_observations','mapPhoto','linephoto1','linephoto2','miscPhoto','site_description']
for item in mycols:
df[item] = df[item].fillna('')
df.mapPhoto = df.groupby('id')['mapPhoto'].transform(lambda x: "%s" % ''.join(x))
df.linephoto1 = df.groupby(['id','date'])['linephoto1'].transform(lambda x: "%s" % ''.join(x))
df.linephoto2 = df.groupby(['id','date'])['linephoto2'].transform(lambda x: "%s" % ''.join(x))
df.miscPhoto = df.groupby(['id','date'])['miscPhoto'].transform(lambda x: "%s" % ''.join(x))
df['site_description'] = df['site_description'].str.strip()
df.to_csv('test.csv')
#done to here. Next, figure out what to do with linephotos, unclassified photos, and their notes.
#make column for photocaptions. When adding linephoto1, add 'note' and 'type' fields to caption column. E.g. 'linephoto1: 100line- | view east along transect.' Then join the rows in the groupby transform and add to site_description field.
df.shape
df[(df.type.str.contains('line\+'))&(df.linephoto2.str.len()<50)]
maps.str.len().sort_values()
ids = list(df['id'])#make a list of ids to iterate over, before the hierarchical index
#df.type = df.type.map({'\*plot summary':'transect','\*remonitoring notes':'transect'})
df.loc[df.type =='map',['mapPhoto']]=df['url'] #moving cell values to correct column
df.set_index(['id','type'],inplace=True) # hierarchical index so we can call locations
#a hierarchical index uses a tuple. You can set values using loc.
#this format: df.loc[destination] = df.loc[source].values[0]
for item in ids:
df.loc[(item,'*plot summary'),'mapPhoto'] = df.loc[(item,'map'),'mapPhoto'].values[0]
#generates a pink warning about performance, but oh well.
#here we are using an expression in parens to test for a condition
(df['type'].str.contains('\s') & df['note'].notnull()).value_counts()
df.url = df.url.str.replace(' ','_');df.url
df.url.head()
df['newurl'] = df.url.str.replace
df.newurl.head()
#for combining rows try something like this:
print(df.groupby('somecolumn')['temp variable'].apply(' '.join).reset_index())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: shift data to correct column
Step2: use groupby and transform to fill the row
Step3: shift data to correct row using a multi-Index
|
2,596
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
import matplotlib.image as mpimg
from IPython.display import HTML
HTML('../style/code_toggle.html')
#soccer = mpimg.imread('figures/WLA_moma_Umberto_Boccioni_Dynamism_of_a_Soccer_Player_1913_512.png')
cyclist = mpimg.imread('figures/Umberto_Boccioni_Dynamism_of_a_Cyclist_512.png')
duck = mpimg.imread('figures/Anas_platyrhynchos_male_female_quadrat_512.png')
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b #standard grayscale conversion
return gray
gCyclist = rgb2gray(cyclist)
gDuck = rgb2gray(duck)
fig = plt.figure(figsize=(8,8))
plt.title('Dynamism of a Cyclist')
img0plot = plt.imshow(gCyclist)
img0plot.set_cmap('gray')
#fig.savefig("cyclist_gray.png", bbox_inches='tight', pad_inches=0)
fig = plt.figure(figsize=(8,8))
plt.title('A Duck')
img1plot = plt.imshow(gDuck)
img1plot.set_cmap('gray')
#fig.savefig("duck_gray.png", bbox_inches='tight', pad_inches=0)
fftCyclist = np.fft.fftshift(np.fft.fft2(gCyclist))
fig, axes = plt.subplots(figsize=(16,8))
plt.suptitle('The Fourier Transform of \'Dynamism of a Cyclist\'')
plt.subplot(1,2,1)
plt.imshow( 10. * np.log10(np.abs(fftCyclist))) #amplitude (decibels)
plt.subplot(1,2,2)
plt.imshow( np.angle(fftCyclist)) #phase
#fig.savefig("soccer_fft_gray.png", bbox_inches='tight', pad_inches=0)
fftDuck = np.fft.fftshift(np.fft.fft2(gDuck))
fig, axes = plt.subplots(figsize=(16,8))
plt.suptitle('The Fourier Transform of a Duck')
plt.subplot(1,2,1)
plt.imshow( 10. * np.log10(np.abs(fftDuck))) #amplitude (decibels)
plt.subplot(1,2,2)
plt.imshow(np.angle(fftDuck)) #phase
#fig.savefig("duck_fft_gray.png", bbox_inches='tight', pad_inches=0)
fig = plt.figure(figsize=(8,8))
plt.title('Hybrid image: amplitude (painting), phase (duck)')
phs = np.angle(fftDuck) #phase of the duck
amp = np.abs(fftCyclist) #amplitude of the painting
fftHybrid = amp * (np.cos(phs) + 1j * np.sin(phs)) #construct an image with the phase of a duck and amplitude of the painting
hybrid = np.abs(np.fft.ifft2(np.fft.fftshift(fftHybrid))) #compute the inverse Fourier Transform
hybridPlot = plt.imshow(hybrid)
hybridPlot.set_cmap('gray')
#fig.savefig("hybrid_phs_duck_amp_cyclist.png", bbox_inches='tight', pad_inches=0)
fig = plt.figure(figsize=(8,8))
plt.title('Hybrid image: amplitude (duck), phase (painting)')
phs = np.angle(fftCyclist) #phase of the painting
amp = np.abs(fftDuck) #amplitude of the duck
fftHybrid = amp * (np.cos(phs) + 1j * np.sin(phs)) #construct an image with the phase of a painting and amplitude of the duck
hybrid = np.abs(np.fft.ifft2(np.fft.fftshift(fftHybrid))) #compute the inverse Fourier Transform
hybridPlot = plt.imshow(hybrid)
hybridPlot.set_cmap('gray')
#fig.savefig("hybrid_phs_soccer_amp_duck.png", bbox_inches='tight', pad_inches=0)
fig = plt.figure(figsize=(8,8))
plt.title('Duck (phase-only)')
phs = np.angle(fftDuck)
amp = 1.*np.ones_like(fftDuck) #set all the amplitude values to 1
fftPhsImg0 = amp * (np.cos(phs) + 1j * np.sin(phs))
phsImg0 = np.abs(np.fft.ifft2(np.fft.fftshift(fftPhsImg0)))
phsImg0Plot = plt.imshow(phsImg0)
phsImg0Plot.set_cmap('gray')
#fig.savefig("phs_only_duck.png", bbox_inches='tight', pad_inches=0)
fig, axes = plt.subplots(figsize=(16,8))
plt.title('Duck (amplitude-only)')
phs = np.zeros_like(fftDuck) #set the phase information to 0
amp = np.abs(fftDuck)
fftAmpImg0 = amp
plt.subplot(1,2,1)
plt.title('Duck (amp-only)')
ampImg0 = np.abs(np.fft.fftshift(np.fft.ifft2(fftAmpImg0)))
ampImg0Plot = plt.imshow(ampImg0)
ampImg0Plot.set_cmap('gray')
plt.subplot(1,2,2)
plt.title('Duck (amp-only (dB))')
ampImg0deci = 10.*np.log10(np.abs(np.fft.fftshift(np.fft.ifft2(fftAmpImg0))))
ampImg0deciPlot = plt.imshow(ampImg0deci)
ampImg0deciPlot.set_cmap('gray')
#fig.savefig("amp_only_duck.png", bbox_inches='tight', pad_inches=0)
fig, axes = plt.subplots(figsize=(16,8))
plt.subplot(1,2,1)
plt.title('Duck (real-only)')
fftRealImg1 = fftDuck.real
realImg1 = np.abs(np.fft.ifft2(fftRealImg1))
plt.imshow(realImg1)
plt.set_cmap('gray')
plt.subplot(1,2,2)
plt.title('Duck (imaginary-only)')
fftImagImg1 = 1j * fftDuck.imag
imagImg1 = np.abs(np.fft.ifft2(fftImagImg1))
plt.imshow(imagImg1)
plt.set_cmap('gray')
def pointSourceFFT(imgSize, ypos, xpos, amp=1.):
img = np.zeros((imgSize+1, imgSize+1)) #odd sized array such that there is a central pixel
img[ypos, xpos] = amp #make the central pixel have an intensity of 1
fftImg = np.fft.fft2(np.fft.fftshift(img)) #compute the Fourier transform of the image
fig, axes = plt.subplots(figsize=(16,8))
plt.subplot(1,2,1)
plt.title('Image')
plt.imshow(img, interpolation='nearest')
plt.set_cmap('gray')
plt.colorbar(shrink=0.5)
plt.subplot(1,2,2)
plt.title('Fourier Transform (phase)')
plt.imshow(np.angle(fftImg))
plt.set_cmap('hsv')
plt.colorbar(shrink=0.5)
#amplitudes are all 1.
print 'FFT Max:', np.max(np.abs(fftImg)), 'Min:', np.min(np.abs(fftImg))
imgSize = 128
pointSourceFFT(imgSize, (imgSize/2)+1, (imgSize/2)+1)
imgSize = 128
pointSourceFFT(imgSize, (imgSize/2)+1, (imgSize/2))
imgSize = 128
pointSourceFFT(imgSize, imgSize/2, (imgSize/2)+1)
imgSize = 128
pointSourceFFT(imgSize, (imgSize/2)+1 - 10, (imgSize/2)+1) #offset the point source 10 pixels north above the centre
pointSourceFFT(imgSize, (imgSize/2)+1 - 20, (imgSize/2)+1) #offset the point source 20 pixels north above the centre
pointSourceFFT(imgSize, (imgSize/2)+1 - 30, (imgSize/2)+1) #offset the point source 30 pixels north above the centre
pointSourceFFT(imgSize, (imgSize/2)+1 - 40, (imgSize/2)+1) #offset the point source 40 pixels north above the centre
def pointSourceFFTCircle(imgSize, ypos, xpos, amp=1., radius=10.):
img = np.zeros((imgSize+1, imgSize+1)) #odd sized array such that there is a central pixel
img[ypos, xpos] = amp #make the central pixel have an intensity of 1
fftImg = np.fft.fft2(np.fft.fftshift(img)) #compute the Fourier transform of the image
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(1, 2, 1)
plt.title('Image')
c = plt.Circle(((imgSize/2)+1, (imgSize/2)+1), radius, color='blue', linewidth=1, fill=False)
ax.add_patch(c)
plt.imshow(img, interpolation='nearest')
plt.set_cmap('gray')
plt.colorbar(shrink=0.5)
ax = fig.add_subplot(1, 2, 2)
plt.title('Fourier Transform (phase)')
plt.imshow(np.angle(fftImg))
plt.set_cmap('hsv')
plt.colorbar(shrink=0.5)
#amplitudes are all 1.
print 'FFT Max:', np.max(np.abs(fftImg)), 'Min:', np.min(np.abs(fftImg))
imgSize = 128
pointSourceFFTCircle(imgSize, (imgSize/2)+1 - 10, (imgSize/2)+1 - 0, amp=1., radius=10.)
pointSourceFFTCircle(imgSize, (imgSize/2)+1 - 7, (imgSize/2)+1 - 7, amp=1.,radius=10.)
pointSourceFFTCircle(imgSize, (imgSize/2)+1 - 0, (imgSize/2)+1 - 10, amp=1.,radius=10.)
pointSourceFFTCircle(imgSize, (imgSize/2)+1 + 7, (imgSize/2)+1 - 7, amp=1.,radius=10.)
pointSourceFFTCircle(imgSize, (imgSize/2)+1 + 10, (imgSize/2)+1 - 0, amp=1.,radius=10.)
def multipleSourcesFFT(imgSize, pos, amp):
img = np.zeros((imgSize+1, imgSize+1)) #odd sized array such that there is a central pixel
for p,a in zip(pos, amp):
img[p[0], p[1]] = a #make the central pixel have an intensity of 1
fftImg = np.fft.fft2(np.fft.fftshift(img)) #compute the Fourier transform of the image
fig, axes = plt.subplots(figsize=(16,8))
plt.subplot(1,2,1)
plt.title('Image')
plt.imshow(img, interpolation='nearest')
plt.set_cmap('gray')
plt.colorbar(shrink=0.5)
plt.subplot(1,2,2)
plt.title('Fourier Transform (phase)')
plt.imshow(np.angle(fftImg))
plt.set_cmap('hsv')
plt.colorbar(shrink=0.5)
#amplitudes are all 1.
print 'FFT Max:', np.max(np.abs(fftImg)), 'Min:', np.min(np.abs(fftImg))
imgSize = 128
multipleSourcesFFT(imgSize, [[64, 65], [90,65]], [1., 1.])
multipleSourcesFFT(imgSize, [[65, 64], [65,80]], [1., 1.])
multipleSourcesFFT(imgSize, [[64, 65], [90,80]], [1., 1.])
imgSize = 128
multipleSourcesFFT(imgSize, [[64, 65], [90,80]], [1., 0.1])
multipleSourcesFFT(imgSize, [[64, 65], [90,80]], [1., 1.0])
multipleSourcesFFT(imgSize, [[64, 65], [90,80]], [1., 10.])
multipleSourcesFFT(imgSize, [[64,65], [90,80], [40,10], [60,50], [20,80]], [.2, 0.3, 0.5, 0.1, 0.7])
fftDuck = np.fft.fftshift(np.fft.fft2(gDuck))
def reconstructImage(vis, nsamples):
randomly select a few values from the spatial frequency (visibility) domain
and reconstruct the image with those samples. To do a full reconstruction of the
image, nsamples needs to be larger than the total number of pixels in the image
because the np.random.randint() chooses with replacement so positions will be
doubles counted
subVis = np.zeros_like(vis)
ypos = np.random.randint(0, vis.shape[0]-1, size=nsamples)
xpos = np.random.randint(0, vis.shape[1]-1, size=nsamples)
subVis[ypos, xpos] = vis[ypos, xpos] #insert a subset of visibilities
newImg = np.abs(np.fft.ifft2(np.fft.fftshift(subVis)))
fig, axes = plt.subplots(figsize=(16,8))
plt.subplot(1,2,1)
plt.title('Sampled Visibilities')
plt.imshow(np.abs(subVis).astype(bool), interpolation='nearest')
plt.set_cmap('gray')
plt.subplot(1,2,2)
plt.title('Reconstructed Image')
plt.imshow(newImg)
plt.set_cmap('gray')
reconstructImage(fftDuck, 1e1)
reconstructImage(fftDuck, 1e3)
reconstructImage(fftDuck, 1e5)
reconstructImage(fftDuck, 1e6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step2: 5.1 Spatial Frequencies<a id='imaging
Step3: For simplicity convert the RGB-color images to grayscale
Step4: Start by displaying the images in the spatial domain, i.e. the domain we usually look at images in. We use the term spatial domain to describe signals which are ordered by a distance which is directly related to the physical distance between the two signals, e.g. for the duck image below the eye is close to the head and far away from the foot as it is in the real world. This may seem like such a simple concept and you may wonder why the point is even being made. But, once we step into the spatial frequency domain our intution will be lost and we need to use the spatial domain relationship to try to regain our footing.
Step5: Figure
Step6: Figure
Step7: Figure
Step8: Figure
Step9: Figure
Step10: Figure
Step11: Figure
Step12: Figure
Step13: Figure
Step14: Left
Step15: Left
Step16: Left
Step17: Left
Step18: Left
Step19: Left
Step20: Left
Step22: Left
|
2,597
|
<ASSISTANT_TASK:>
Python Code:
from lightning import Lightning
from numpy import random, asarray, concatenate
from sklearn import datasets
lgn = Lightning(ipython=True, host='http://public.lightning-viz.org')
imgs = datasets.load_sample_images().images
lgn.image(imgs[0])
imgs = datasets.load_sample_images().images
lgn.image(imgs[0][:,:,0])
imgs = datasets.load_sample_images().images
lgn.image(imgs[0], width=400)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connect to server
Step2: <hr> Basic image viewing
Step3: Single-channel images will automatically be presented as grayscale.
Step4: The usual sizing arguments can be used to set the image size in pixels.
|
2,598
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tqdm import tqdm
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
n_residual_blocks = 5
# The data, split between train and test sets
(x, _), (y, _) = keras.datasets.mnist.load_data()
# Concatenate all of the images together
data = np.concatenate((x, y), axis=0)
# Round all pixel values less than 33% of the max 256 value to 0
# anything above this value gets rounded up to 1 so that all values are either
# 0 or 1
data = np.where(data < (0.33 * 256), 0, 1)
data = data.astype(np.float32)
# The first layer is the PixelCNN layer. This layer simply
# builds on the 2D convolutional layer, but includes masking.
class PixelConvLayer(layers.Layer):
def __init__(self, mask_type, **kwargs):
super(PixelConvLayer, self).__init__()
self.mask_type = mask_type
self.conv = layers.Conv2D(**kwargs)
def build(self, input_shape):
# Build the conv2d layer to initialize kernel variables
self.conv.build(input_shape)
# Use the initialized kernel to create the mask
kernel_shape = self.conv.kernel.get_shape()
self.mask = np.zeros(shape=kernel_shape)
self.mask[: kernel_shape[0] // 2, ...] = 1.0
self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0
if self.mask_type == "B":
self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0
def call(self, inputs):
self.conv.kernel.assign(self.conv.kernel * self.mask)
return self.conv(inputs)
# Next, we build our residual block layer.
# This is just a normal residual block, but based on the PixelConvLayer.
class ResidualBlock(keras.layers.Layer):
def __init__(self, filters, **kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
self.pixel_conv = PixelConvLayer(
mask_type="B",
filters=filters // 2,
kernel_size=3,
activation="relu",
padding="same",
)
self.conv2 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.pixel_conv(x)
x = self.conv2(x)
return keras.layers.add([inputs, x])
inputs = keras.Input(shape=input_shape)
x = PixelConvLayer(
mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same"
)(inputs)
for _ in range(n_residual_blocks):
x = ResidualBlock(filters=128)(x)
for _ in range(2):
x = PixelConvLayer(
mask_type="B",
filters=128,
kernel_size=1,
strides=1,
activation="relu",
padding="valid",
)(x)
out = keras.layers.Conv2D(
filters=1, kernel_size=1, strides=1, activation="sigmoid", padding="valid"
)(x)
pixel_cnn = keras.Model(inputs, out)
adam = keras.optimizers.Adam(learning_rate=0.0005)
pixel_cnn.compile(optimizer=adam, loss="binary_crossentropy")
pixel_cnn.summary()
pixel_cnn.fit(
x=data, y=data, batch_size=128, epochs=50, validation_split=0.1, verbose=2
)
from IPython.display import Image, display
# Create an empty array of pixels.
batch = 4
pixels = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:])
batch, rows, cols, channels = pixels.shape
# Iterate over the pixels because generation has to be done sequentially pixel by pixel.
for row in tqdm(range(rows)):
for col in range(cols):
for channel in range(channels):
# Feed the whole array and retrieving the pixel value probabilities for the next
# pixel.
probs = pixel_cnn.predict(pixels)[:, row, col, channel]
# Use the probabilities to pick pixel values and append the values to the image
# frame.
pixels[:, row, col, channel] = tf.math.ceil(
probs - tf.random.uniform(probs.shape)
)
def deprocess_image(x):
# Stack the single channeled black and white image to RGB values.
x = np.stack((x, x, x), 2)
# Undo preprocessing
x *= 255.0
# Convert to uint8 and clip to the valid range [0, 255]
x = np.clip(x, 0, 255).astype("uint8")
return x
# Iterate over the generated images and plot them with matplotlib.
for i, pic in enumerate(pixels):
keras.preprocessing.image.save_img(
"generated_image_{}.png".format(i), deprocess_image(np.squeeze(pic, -1))
)
display(Image("generated_image_0.png"))
display(Image("generated_image_1.png"))
display(Image("generated_image_2.png"))
display(Image("generated_image_3.png"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data
Step2: Create two classes for the requisite Layers for the model
Step3: Build the model based on the original paper
Step4: Demonstration
|
2,599
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import openpathsampling as paths
try:
import openmm as omm
import openmm.unit as u
except ImportError: # OpenMM < 7.6
import simtk.openmm as omm
import simtk.unit as u
import mdtraj as md
import openpathsampling.engines.openmm as eng
# NBVAL_IGNORE_OUTPUT
# Ignore until next OpenMMTools release (> 0.20.3), see openpathsampling/openpathsampling#1091
import openmmtools as omt
testsystem = omt.testsystems.AlanineDipeptideVacuum()
# NBVAL_SKIP
{ key: type(value) for key, value in testsystem.__dict__.items()}
template = eng.snapshot_from_testsystem(testsystem)
system = testsystem.system
integrator = omm.VerletIntegrator(
0.002 * u.picoseconds
)
engine = eng.Engine(
template.topology,
system,
integrator)
traj = engine.generate(template, [paths.LengthEnsemble(10).can_append])
assert len(traj) == 10
# NBVAL_SKIP
print(traj[5].coordinates)
# NBVAL_IGNORE_OUTPUT
# covers mdtraj warning
psi = md.compute_psi(traj.to_mdtraj())
# NBVAL_IGNORE_OUTPUT
psi[1][3:8]
st = paths.Storage('engine_store_test.nc', mode='w')
st.engines.save(engine);
st.tag['template'] = template
# NBVAL_SKIP
st.variables['engines_json'][0][0:256] + '...'
st.save(traj);
st.close()
st = paths.AnalysisStorage('engine_store_test.nc')
engine = st.engines[0]
template = st.tag['template']
traj = engine.generate(template, [paths.LengthEnsemble(10).can_append])
print(traj)
st.close()
st = paths.Storage('engine_store_test.nc', "a")
st.save(traj)
st.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Alanine in Vacuum and run it using OPS.
Step2: Let's have a look at the content
Step3: An OpenMM simulation in OPS needs 3 ingredients to function
Step4: 2. The system
Step5: 3. The integrator
Step6: build the engine
Step7: Let's run a simulation of 10 steps.
Step8: The engine is stored by using the XML serialization of OpenMM and put this into the storage.
Step9: try to load the stored engine
Step10: Run another 10 steps to test
Step11: And save the trajectory for future use (other tests)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.