text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 单机模拟
|
<ASSISTANT_TASK:>
Python Code:
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated-nightly
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import collections
import time
import tensorflow as tf
import tensorflow_federated as tff
source, _ = tff.simulation.datasets.emnist.load_data()
def map_fn(example):
return collections.OrderedDict(
x=tf.reshape(example['pixels'], [-1, 784]), y=example['label'])
def client_data(n):
ds = source.create_tf_dataset_for_client(source.client_ids[n])
return ds.repeat(10).shuffle(500).batch(20).map(map_fn)
train_data = [client_data(n) for n in range(10)]
element_spec = train_data[0].element_spec
def model_fn():
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(units=10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
return tff.learning.from_keras_model(
model,
input_spec=element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
trainer = tff.learning.build_federated_averaging_process(
model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.02))
def evaluate(num_rounds=10):
state = trainer.initialize()
for _ in range(num_rounds):
t1 = time.time()
state, metrics = trainer.next(state, train_data)
t2 = time.time()
print('metrics {m}, round time {t:.2f} seconds'.format(
m=metrics, t=t2 - t1))
evaluate()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: mnist_loader
Step3: Test mnist data
Step4: We are generating synthetic data in this project, so all the 55000 samples can be used for training
Step5: xavier_init
Step6: Test xavier_init
Step8: vae_init
Step9: vae_train
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
def mnist_loader():
Load MNIST data in tensorflow readable format
The script comes from:
https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/mnist/input_data.py
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = read_data_sets('MNIST_data', one_hot=True)
n_samples = mnist.train.num_examples
return (mnist, n_samples)
(mnist, n_samples) = mnist_loader()
print('Number of available data: %d' % n_samples)
x_sample = mnist.test.next_batch(100)[0]
plt.figure(figsize=(8, 4))
for i in range(6):
plt.subplot(2, 3, i + 1)
plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("MNIST Data")
plt.colorbar()
plt.tight_layout()
def xavier_init(neuron_in, neuron_out, constant=1):
low = -constant*np.sqrt(6/(neuron_in + neuron_out))
high = constant*np.sqrt(6/(neuron_in + neuron_out))
return tf.random_uniform((neuron_in, neuron_out), minval=low, maxval=high, dtype=tf.float32)
sess = tf.Session()
weights = []
for i in range(1000):
weights.append(sess.run(xavier_init(3,3)))
weights = np.array(weights).reshape((-1,1))
n, bins, patches = plt.hist(weights, bins=20)
plt.xlabel('weight value')
plt.ylabel('counts')
plt.title('Histogram of Weights Initialized by Xavier')
plt.show()
def vae_init(batch_size=100, learn_rate=0.001, x_in=784, encoder_1=500, encoder_2=500, decoder_1=500, decoder_2=500, z=20):
This function build a varational autoencoder based on https://jmetzen.github.io/2015-11-27/vae.html
In consideration of simplicity and future work on optimization, we removed the class structure
A tensorflow session, optimizer and cost function as well as input data will be returned
# configuration of network
# x_in = 784
# encoder_1 = 500
# encoder_2 = 500
# decoder_1 = 500
# decoder_2 = 500
# z = 20
# input
x = tf.placeholder(tf.float32, [None, x_in])
# initialize weights
# two layers encoder
encoder_h1 = tf.Variable(xavier_init(x_in, encoder_1))
encoder_h2 = tf.Variable(xavier_init(encoder_1, encoder_2))
encoder_mu = tf.Variable(xavier_init(encoder_2, z))
encoder_sigma = tf.Variable(xavier_init(encoder_2, z))
encoder_b1 = tf.Variable(tf.zeros([encoder_1], dtype=tf.float32))
encoder_b2 = tf.Variable(tf.zeros([encoder_2], dtype=tf.float32))
encoder_bias_mu = tf.Variable(tf.zeros([z], dtype=tf.float32))
encoder_bias_sigma = tf.Variable(tf.zeros([z], dtype=tf.float32))
# two layers decoder
decoder_h1 = tf.Variable(xavier_init(z, decoder_1))
decoder_h2 = tf.Variable(xavier_init(decoder_1, decoder_2))
decoder_mu = tf.Variable(xavier_init(decoder_2, x_in))
decoder_sigma = tf.Variable(xavier_init(decoder_2, x_in))
decoder_b1 = tf.Variable(tf.zeros([decoder_1], dtype=tf.float32))
decoder_b2 = tf.Variable(tf.zeros([decoder_2], dtype=tf.float32))
decoder_bias_mu = tf.Variable(tf.zeros([x_in], dtype=tf.float32))
decoder_bias_sigma = tf.Variable(tf.zeros([x_in], dtype=tf.float32))
# compute mean and sigma of z
layer_1 = tf.nn.softplus(tf.add(tf.matmul(x, encoder_h1), encoder_b1))
layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, encoder_h2), encoder_b2))
z_mean = tf.add(tf.matmul(layer_2, encoder_mu), encoder_bias_mu)
z_sigma = tf.add(tf.matmul(layer_2, encoder_sigma), encoder_bias_sigma)
# compute z by drawing sample from normal distribution
eps = tf.random_normal((batch_size, z), 0, 1, dtype=tf.float32)
z_val = tf.add(z_mean, tf.multiply(tf.sqrt(tf.exp(z_sigma)), eps))
# use z to reconstruct the network
layer_1 = tf.nn.softplus(tf.add(tf.matmul(z_val, decoder_h1), decoder_b1))
layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, decoder_h2), decoder_b2))
x_prime = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, decoder_mu), decoder_bias_mu))
# define loss function
# reconstruction lost
recons_loss = -tf.reduce_sum(x * tf.log(1e-10 + x_prime) + (1-x) * tf.log(1e-10 + 1 - x_prime), 1)
# KL distance
latent_loss = -0.5 * tf.reduce_sum(1 + z_sigma - tf.square(z_mean) - tf.exp(z_val), 1)
# summing two loss terms together
cost = tf.reduce_mean(recons_loss + latent_loss)
# use ADAM to optimize
optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost)
# initialize all variables
init = tf.global_variables_initializer()
# define and return the session
sess = tf.InteractiveSession()
sess.run(init)
return (sess, optimizer, cost, x, x_prime)
def vae_train(sess, optimizer, cost, x, batch_size=100, learn_rate=0.001, x_in=784, encoder_1=500, encoder_2=500, decoder_1=500,
decoder_2=500, z=20, train_epoch=10, verb=1, verb_step=5):
for epoch in range(train_epoch):
avg_cost = 0
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_x, _ = mnist.train.next_batch(batch_size)
_, c = sess.run((optimizer, cost), feed_dict={x: batch_x})
avg_cost += c / n_samples * batch_size
if verb:
if epoch % verb_step == 0:
# print('Epoch:%04d\tCost=%.2f' % (epoch+1, avg_cost))
print('Epoch:%04d' % (epoch+1), 'cost=', '{:.9f}'.format(avg_cost))
(sess, optimizer, cost, x, x_prime) = vae_init()
vae_train(sess, optimizer, cost, x, train_epoch=75)
x_sample = mnist.test.next_batch(100)[0]
x_reconstruct = sess.run(x_prime, feed_dict={x: x_sample})
plt.figure(figsize=(8, 12))
for i in range(5):
plt.subplot(5, 2, 2*i + 1)
plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Test input")
plt.colorbar()
plt.subplot(5, 2, 2*i + 2)
plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Reconstruction")
plt.colorbar()
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 协程可以处于 4 个状态中的一个。当前状态可以使用 inspect.getgeneratorstate(...) 函数确定,该函数会返回下面字符串中的一个
Step2: 注意错误描述,描述的很清楚
Step3: 注意这个是产出值的时间
Step4: 使用协程计算移动平均值
Step5: 这个无限循环表明,只要调用方不断把值发给协程,它就会一直接收值,然后生成结果。晋档调用方在协程上调用 .close() 方法,或者没有协程的引用而被垃圾回收程序回收时,这个协程才会终止。
Step6: 我们一定想知道如何停止这个协程,但是在此之前我们先讨论如何启动协程在使用协程之前必须要预激,这一步容易忘记,为了避免忘记,可以在协程上使用一个特殊的装饰器
Step7: 使用 yield from 语法调用协程也会自动预激(一会讲),因此会和上面的 @coroutine 装饰器不兼容。Python 3.4 标准库中的 asyncio.coroutine 装饰器(第 18 章)不会预激协程,因此能兼容 yield from 方法
Step8: 如果把 DemoException 异常传入 demo_exc_handling 协程,它会处理,然后继续运行,如下:
Step9: 但是,传入协程的异常没有处理,协程会停止,即状态变成 GEN_CLOSE
Step10: 如果不管协程如何结束都想做那些清理工作,要把协程定义体中相关代码放入 try/finally 块中,如下:
Step11: Python 3.3 引入 yield from 结构的主要原因之一与把异常传入嵌套的协程有关。另一个原因是让协程更方便的返回值
Step12: 下面展示捕获 StopIteration 异常:
Step13: 获取协程返回值虽然要绕个圈子,但是这是 PEP 380 定义的方式,如果我们意识到这一点就说得通了,yield from 结构会在内部自动捕获 StopIteration 异常。这种处理方式与 for 循环处理 StopIteration 异常的方式一样,循环机制使用户用易于理解的方式处理异常,对于 yield from 来说,解释器不仅会捕获 StopIteration 异常,而且还会把 value 属性的值变成 yield from 表达式的值。可惜,我们无法在控制台中使用交互的方式测试这种行为,因为在函数外部使用 yield from(以及 yield)会导致语法出错
Step14: 可以改写成:
Step15: 在 14 章首次提到 yield from 时举了一个例子,演示这个结构用法:
Step16: yield from x 表达式对 x 做的第一件事是就是,调用 iter(x), 从中获取迭代器。因此 x 可以是任何可迭代对象。
|
<ASSISTANT_TASK:>
Python Code:
def simple_coroutine():
print('-> coroutine started')
# 如果协程只需要从客户那里接收数据,那么产出的值是 None
# 这个值是隐式指定的,因为 yield 关键字右面没有表达式
x = yield
print('-> croutine received:', x)
my_coro = simple_coroutine()
my_coro
# 先调用 next(...) 函数,因为生成器还没启动,没在 yield 语句暂停,所以无法发送数据
next(my_coro)
# 协程定义体中的 yield 表达式会计算出 42,现在协程会恢复,
# 一直运行到下一个 yield 表达式或者终止
my_coro.send(42) # 控制权流动到协程定义体末尾,生成器抛出 StopIteration 异常
my_coro = simple_coroutine()
my_coro.send(9577)
def simple_coro2(a):
print('-> Started a=', a)
b = yield a
print('-> Received: b=', b)
c = yield a + b
print('-> Received: c=', c)
my_coro2 = simple_coro2(14)
from inspect import getgeneratorstate
getgeneratorstate(my_coro2) # 协程未启动
next(my_coro2) # 产出 a 的值,暂停,等待为 b 赋值
getgeneratorstate(my_coro2) # 协程在 yield 表达式暂停
# 把数字 28 给协程,计算 yield 表达式,得到 28,然后把 28 绑定给 b
# 打印 b = 28 消息,产出 a + b 的值 (42),然后协程暂停,等待为 c 赋值
my_coro2.send(28)
my_coro2.send(99)
from IPython.display import Image
Image(filename='yield.png')
def averager():
total =0.0
count = 0
average = None
while True:
term = yield average
total += term
count += 1
average = total / count
coro_avg = averager()
next(coro_avg)
coro_avg.send(10)
coro_avg.send(30)
coro_avg.send(5)
from functools import wraps
def coroutine(func):
@wraps(func)
def primer(*args, **kwargs):
# 把装饰器生成器函数替换成这里的 primer 函数
# 调用 primer 函数时,返回预激后的生成器
gen = func(*args, **kwargs) # 调用被装饰的函数,获取生成器对象
next(gen) # 预激生成器
return gen # 返回生成器
return primer
@coroutine
def averager():
total = 0.0
count = 0
average = None
while True:
term = yield average
total += term
count += 1
average = total / count
coro_avg = averager() # 使用 coroutine 装饰的协程,可以立即开始发送值
coro_avg.send(40)
coro_avg.send(50)
coro_avg.send('spam') # 发送的不是数字,导致协程内部有异常抛出
coro_avg.send(60) # 由于协程内部没有异常处理,协程会终止,试图重新激活协程,会抛出 StopIteration 异常
class DemoException(Exception):
'''为这次演示定义的异常类型'''
def demo_exc_handling():
print('-> coroutine started')
while True:
try:
x = yield
except DemoException: # 特别处理 DemoException 异常
print('*** DemoException handled. Continuing...')
else:
print('-> coroutine received: {!r}'.format(x))
# 这一行永远不会执行,因为有未处理异常才会终止循环,而一旦出现未处理异常,协程就会终止
raise RuntimeError('This line should never run')
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.send(22)
exc_coro.close()
from inspect import getgeneratorstate
getgeneratorstate(exc_coro)
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.throw(DemoException)
getgeneratorstate(exc_coro)
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.throw(ZeroDivisionError)
getgeneratorstate(exc_coro)
class DemoException(Exception):
'''为这次演示定义的异常类型'''
def demo_finally():
print('-> coroutine started')
try:
while True:
try:
x = yield
except DemoException: # 特别处理 DemoException 异常
print('*** DemoException handled. Continuing...')
else:
print('-> coroutine received: {!r}'.format(x))
finally:
print('-> coroutine ending')
from collections import namedtuple
Result = namedtuple('Result', 'count average')
def averager():
total = 0.0
count = 0
average = None
while True:
term = yield
if term is None: # 为了返回值,协程必须正常停止
break
total += term
count += 1
average = total / count
return Result(count, average) # 返回 namedtuple,在 Python 3.3 之前如果生成器返回值会报错
coro_avg = averager()
next(coro_avg)
coro_avg.send(10) # 这一版不返回值
coro_avg.send(30)
coro_avg.send(6.5)
# 发 None 会终止循环,导致协程结束,一如既往,生成器会抛出 StopIteration 异常,异常对象的 value 属性保存着返回的值
coro_avg.send(None)
coro_avg = averager()
next(coro_avg)
coro_avg.send(10)
coro_avg.send(30)
coro_avg.send(6.5)
try:
coro_avg.send(None)
except StopIteration as exc:
result = exc.value
result
def gen():
for c in 'AB':
yield c
for i in range(1, 3):
yield i
list(gen())
def gen():
yield from 'AB'
yield from range(1, 3)
list(gen())
def chain(*iterables):
for it in iterables:
yield from it
s = 'ABC'
t = tuple(range(3))
list(chain(s, t))
from collections import namedtuple
from time import sleep
Result = namedtuple('Result', 'count average')
# 子生成器
def averager():
total = 0.0
count = 0
average = None
while True:
# print('11111')
term = yield # main 函数中的客户代码发送的各个值绑定到这里的 term 变量
if term is None: # 很重要的停止条件,不这么做,yield from 调用此协程会永远阻塞
break
total += term
count += 1
average = total / count
return Result(count, average) # 返回的 Result 会成为 grouper 函数中的 yield from 表达式的值
# 委派生成器
def grouper(results, key):
while True: # 每次迭代会新建一个 average 实例,每个实例都是作为协程使用的生成器对象
# grouper 发送的每个值都会经由 yield from 处理,通过管道传给 averager 实例
# grouper 会在 yield from 表达式暂停,等待 averager 实例处理完客户端发来的值
# averager 运行结束后,返回的值绑定到 results[key] 上,while 循环会不断的创建 averager 实例,处理更多的值
results[key] = yield from averager()
# 客户端代码
def main(data):
results = {}
for key, values in data.items():
group = grouper(results, key) # 调用 grouper 得到生成器对象,传给 grouper 的第一个参数用于收集结果,group 作为协程使用
next(group) # 预激
# sleep(60)
for value in values:
#把各个 value 传给 grouper。传入的值最终到达 averager 函数中的 term = yield 那一行:
# grouper 永远不知道传入的值是什么
group.send(value)
# 把 None 传入grouper,导致当前的averager 实例终止,也让 grouper 继续运行,然后再创建一组 averager 实例,处理下一组值
group.send(None) # 重要,没有这行 averager 不会终止,results[key] 不会被赋值就到了下一个 group
#print(results)
report(results)
# 输出报告
def report(results):
for key, result in sorted(results.items()):
group, unit = key.split(';')
print('{:2} {:5} averaging {:.2f}{}'.format(
result.count, group, result.average, unit))
data = {
'girls;kg':
[40.9, 38.5, 44.3, 42.2, 45.2, 41.7, 44.5, 38.0, 40.6, 44.5],
'girls;m':
[1.6, 1.51, 1.4, 1.3, 1.41, 1.39, 1.33, 1.46, 1.45, 1.43],
'boys;kg':
[39.0, 40.8, 43.2, 40.8, 43.1, 38.6, 41.4, 40.6, 36.3],
'boys;m':
[1.38, 1.5, 1.32, 1.25, 1.37, 1.48, 1.25, 1.49, 1.46],
}
main(data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: # Pandas 간단 소개
Step2: Pandas의 기본 데이터 구조는 두 가지 클래스로 구현됩니다.
Step3: DataFrame 객체는 string 열 이름과 매핑되는 'dict'를 각각의 Series에 전달하여 만들 수 있습니다. Series의 길이가 일치하지 않는 경우, 누락된 값은 특수 NA/NaN 값으로 채워집니다. 예를 들면 다음과 같습니다.
Step4: 하지만 대부분의 경우 전체 파일을 DataFrame으로 로드합니다. 다음 예는 캘리포니아 부동산 데이터가 있는 파일을 로드합니다. 다음 셀을 실행하여 데이터에 로드하고 기능 정의를 만들어 보세요.
Step5: 위의 예에서는 DataFrame.describe를 사용하여 DataFrame에 관한 흥미로운 통계를 보여줍니다. 또 다른 유용한 함수는 DataFrame.head로, DataFrame 레코드 중 처음 몇 개만 표시합니다.
Step6: Pandas의 또 다른 강력한 기능은 그래핑입니다. 예를 들어 DataFrame.hist를 사용하면 한 열에서 값의 분포를 빠르게 검토할 수 있습니다.
Step7: ## 데이터 액세스
Step8: 또한 Pandas는 고급 색인 생성 및 선택 기능을 위한 풍부한 API를 제공합니다. 이 내용은 너무 광범위하므로 여기에서 다루지 않습니다.
Step9: NumPy는 유명한 계산과학 툴킷입니다. Pandas Series는 대부분의 NumPy 함수에 인수로 사용할 수 있습니다.
Step10: 더 복잡한 단일 열 변환에는 Series.apply를 사용할 수 있습니다. Python map 함수처럼,
Step11: DataFrames 수정 역시 간단합니다. 예를 들어 다음 코드는 기존 DataFrame에 두 개의 Series를 추가합니다.
Step12: ## 실습 #1
Step13: ### 해결 방법
Step14: ## 색인
Step15: DataFrame.reindex를 호출하여 수동으로 행의 순서를 재정렬합니다. 예를 들어 다음은 도시 이름을 기준으로 분류하는 것과 효과가 같습니다.
Step16: 색인 재생성은 DataFrame을 섞기(임의 설정하기) 위한 좋은 방법입니다. 아래의 예에서는 배열처럼 된 색인을 NumPy의 random.permutation 함수에 전달하여 값을 섞습니다. 이렇게 섞인 배열로 reindex를 호출하면 DataFrame 행도 같은 방식으로 섞입니다.
Step17: 자세한 정보는 색인 문서를 참조하세요.
Step18: ### 해결 방법
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import pandas as pd
pd.__version__
pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
city_names = pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
population = pd.Series([852469, 1015785, 485199])
pd.DataFrame({ 'City name': city_names, 'Population': population })
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe.describe()
california_housing_dataframe.head()
california_housing_dataframe.hist('housing_median_age')
cities = pd.DataFrame({ 'City name': city_names, 'Population': population })
print(type(cities['City name']))
cities['City name']
print(type(cities['City name'][1]))
cities['City name'][1]
print(type(cities[0:2]))
cities[0:2]
population / 1000.
import numpy as np
np.log(population)
population.apply(lambda val: val > 1000000)
cities['Area square miles'] = pd.Series([46.87, 176.53, 97.92])
cities['Population density'] = cities['Population'] / cities['Area square miles']
cities
# Your code here
cities['Is wide and has saint name'] = (cities['Area square miles'] > 50) & cities['City name'].apply(lambda name: name.startswith('San'))
cities
city_names.index
cities.index
cities.reindex([2, 0, 1])
cities.reindex(np.random.permutation(cities.index))
# Your code here
cities.reindex([0, 4, 5, 2])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TF Lattice の缶詰 Estimator
Step2: 必要なパッケージをインポートします。
Step3: UCI Statlog(心臓)データセットをダウンロードします。
Step4: このガイドのトレーニングに使用されるデフォルト値を設定します。
Step5: 特徴量カラム
Step6: TFL 缶詰 Estimator は、どの種類の較正レイヤーを使用するかを決定するために特徴量カラムの種類を使用します。数値特徴量カラムには tfl.layers.PWLCalibration レイヤーを使用し、分類特徴量カラムには tfl.layers.CategoricalCalibration レイヤーを使用します。
Step7: 特徴量の構成
Step8: 較正済みの線形モデル
Step9: 較正済みの格子モデル
Step10: 較正済みの格子アンサンブル
Step11: RTL レイヤーのランダムな格子アンサンブル
Step12: Crystals 格子アンサンブル
Step13: その他とで、モデル構成で lattice='crystals' を設定すると、Crystal モデルを作成することができます。
Step14: tfl.visualization モジュールを使用して、より詳細に特徴量キャリブレータを描画することができます。
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install tensorflow-lattice
import tensorflow as tf
import copy
import logging
import numpy as np
import pandas as pd
import sys
import tensorflow_lattice as tfl
from tensorflow import feature_column as fc
logging.disable(sys.maxsize)
csv_file = tf.keras.utils.get_file(
'heart.csv', 'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv')
df = pd.read_csv(csv_file)
target = df.pop('target')
train_size = int(len(df) * 0.8)
train_x = df[:train_size]
train_y = target[:train_size]
test_x = df[train_size:]
test_y = target[train_size:]
df.head()
LEARNING_RATE = 0.01
BATCH_SIZE = 128
NUM_EPOCHS = 500
PREFITTING_NUM_EPOCHS = 10
# Feature columns.
# - age
# - sex
# - cp chest pain type (4 values)
# - trestbps resting blood pressure
# - chol serum cholestoral in mg/dl
# - fbs fasting blood sugar > 120 mg/dl
# - restecg resting electrocardiographic results (values 0,1,2)
# - thalach maximum heart rate achieved
# - exang exercise induced angina
# - oldpeak ST depression induced by exercise relative to rest
# - slope the slope of the peak exercise ST segment
# - ca number of major vessels (0-3) colored by flourosopy
# - thal 3 = normal; 6 = fixed defect; 7 = reversable defect
feature_columns = [
fc.numeric_column('age', default_value=-1),
fc.categorical_column_with_vocabulary_list('sex', [0, 1]),
fc.numeric_column('cp'),
fc.numeric_column('trestbps', default_value=-1),
fc.numeric_column('chol'),
fc.categorical_column_with_vocabulary_list('fbs', [0, 1]),
fc.categorical_column_with_vocabulary_list('restecg', [0, 1, 2]),
fc.numeric_column('thalach'),
fc.categorical_column_with_vocabulary_list('exang', [0, 1]),
fc.numeric_column('oldpeak'),
fc.categorical_column_with_vocabulary_list('slope', [0, 1, 2]),
fc.numeric_column('ca'),
fc.categorical_column_with_vocabulary_list(
'thal', ['normal', 'fixed', 'reversible']),
]
train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=train_x,
y=train_y,
shuffle=False,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
num_threads=1)
# feature_analysis_input_fn is used to collect statistics about the input.
feature_analysis_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=train_x,
y=train_y,
shuffle=False,
batch_size=BATCH_SIZE,
# Note that we only need one pass over the data.
num_epochs=1,
num_threads=1)
test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=test_x,
y=test_y,
shuffle=False,
batch_size=BATCH_SIZE,
num_epochs=1,
num_threads=1)
# Serving input fn is used to create saved models.
serving_input_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec=fc.make_parse_example_spec(feature_columns)))
# Feature configs are used to specify how each feature is calibrated and used.
feature_configs = [
tfl.configs.FeatureConfig(
name='age',
lattice_size=3,
# By default, input keypoints of pwl are quantiles of the feature.
pwl_calibration_num_keypoints=5,
monotonicity='increasing',
pwl_calibration_clip_max=100,
# Per feature regularization.
regularizer_configs=[
tfl.configs.RegularizerConfig(name='calib_wrinkle', l2=0.1),
],
),
tfl.configs.FeatureConfig(
name='cp',
pwl_calibration_num_keypoints=4,
# Keypoints can be uniformly spaced.
pwl_calibration_input_keypoints='uniform',
monotonicity='increasing',
),
tfl.configs.FeatureConfig(
name='chol',
# Explicit input keypoint initialization.
pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],
monotonicity='increasing',
# Calibration can be forced to span the full output range by clamping.
pwl_calibration_clamp_min=True,
pwl_calibration_clamp_max=True,
# Per feature regularization.
regularizer_configs=[
tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4),
],
),
tfl.configs.FeatureConfig(
name='fbs',
# Partial monotonicity: output(0) <= output(1)
monotonicity=[(0, 1)],
),
tfl.configs.FeatureConfig(
name='trestbps',
pwl_calibration_num_keypoints=5,
monotonicity='decreasing',
),
tfl.configs.FeatureConfig(
name='thalach',
pwl_calibration_num_keypoints=5,
monotonicity='decreasing',
),
tfl.configs.FeatureConfig(
name='restecg',
# Partial monotonicity: output(0) <= output(1), output(0) <= output(2)
monotonicity=[(0, 1), (0, 2)],
),
tfl.configs.FeatureConfig(
name='exang',
# Partial monotonicity: output(0) <= output(1)
monotonicity=[(0, 1)],
),
tfl.configs.FeatureConfig(
name='oldpeak',
pwl_calibration_num_keypoints=5,
monotonicity='increasing',
),
tfl.configs.FeatureConfig(
name='slope',
# Partial monotonicity: output(0) <= output(1), output(1) <= output(2)
monotonicity=[(0, 1), (1, 2)],
),
tfl.configs.FeatureConfig(
name='ca',
pwl_calibration_num_keypoints=4,
monotonicity='increasing',
),
tfl.configs.FeatureConfig(
name='thal',
# Partial monotonicity:
# output(normal) <= output(fixed)
# output(normal) <= output(reversible)
monotonicity=[('normal', 'fixed'), ('normal', 'reversible')],
),
]
# Model config defines the model structure for the estimator.
model_config = tfl.configs.CalibratedLinearConfig(
feature_configs=feature_configs,
use_bias=True,
output_calibration=True,
regularizer_configs=[
# Regularizer for the output calibrator.
tfl.configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
])
# A CannedClassifier is constructed from the given model config.
estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns[:5],
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42))
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
print('Calibrated linear test AUC: {}'.format(results['auc']))
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
model_graph = tfl.estimators.get_model_graph(saved_model_path)
tfl.visualization.draw_model_graph(model_graph)
# This is calibrated lattice model: Inputs are calibrated, then combined
# non-linearly using a lattice layer.
model_config = tfl.configs.CalibratedLatticeConfig(
feature_configs=feature_configs,
regularizer_configs=[
# Torsion regularizer applied to the lattice to make it more linear.
tfl.configs.RegularizerConfig(name='torsion', l2=1e-4),
# Globally defined calibration regularizer is applied to all features.
tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4),
])
# A CannedClassifier is constructed from the given model config.
estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns[:5],
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42))
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
print('Calibrated lattice test AUC: {}'.format(results['auc']))
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
model_graph = tfl.estimators.get_model_graph(saved_model_path)
tfl.visualization.draw_model_graph(model_graph)
# This is random lattice ensemble model with separate calibration:
# model output is the average output of separately calibrated lattices.
model_config = tfl.configs.CalibratedLatticeEnsembleConfig(
feature_configs=feature_configs,
num_lattices=5,
lattice_rank=3)
# A CannedClassifier is constructed from the given model config.
estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42))
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
print('Random ensemble test AUC: {}'.format(results['auc']))
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
model_graph = tfl.estimators.get_model_graph(saved_model_path)
tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)
# Make sure our feature configs have the same lattice size, no per-feature
# regularization, and only monotonicity constraints.
rtl_layer_feature_configs = copy.deepcopy(feature_configs)
for feature_config in rtl_layer_feature_configs:
feature_config.lattice_size = 2
feature_config.unimodality = 'none'
feature_config.reflects_trust_in = None
feature_config.dominates = None
feature_config.regularizer_configs = None
# This is RTL layer ensemble model with separate calibration:
# model output is the average output of separately calibrated lattices.
model_config = tfl.configs.CalibratedLatticeEnsembleConfig(
lattices='rtl_layer',
feature_configs=rtl_layer_feature_configs,
num_lattices=5,
lattice_rank=3)
# A CannedClassifier is constructed from the given model config.
estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42))
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
print('Random ensemble test AUC: {}'.format(results['auc']))
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
model_graph = tfl.estimators.get_model_graph(saved_model_path)
tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)
prefitting_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=train_x,
y=train_y,
shuffle=False,
batch_size=BATCH_SIZE,
num_epochs=PREFITTING_NUM_EPOCHS,
num_threads=1)
# This is Crystals ensemble model with separate calibration: model output is
# the average output of separately calibrated lattices.
model_config = tfl.configs.CalibratedLatticeEnsembleConfig(
feature_configs=feature_configs,
lattices='crystals',
num_lattices=5,
lattice_rank=3)
# A CannedClassifier is constructed from the given model config.
estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
# prefitting_input_fn is required to train the prefitting model.
prefitting_input_fn=prefitting_input_fn,
optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),
prefitting_optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42))
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
print('Crystals ensemble test AUC: {}'.format(results['auc']))
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
model_graph = tfl.estimators.get_model_graph(saved_model_path)
tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)
_ = tfl.visualization.plot_feature_calibrator(model_graph, "age")
_ = tfl.visualization.plot_feature_calibrator(model_graph, "restecg")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2-Point Statistics for Checkerboard Microstructure
Step2: Now let's take a look at how the microstructure looks.
Step3: Compute Periodic 2-Point Statistics
Step4: We have now computed the autocorrelations.
Step5: Notice that for this checkerboard microstructure, the autocorrelation for these 2 local states in the exact same. We have just computed the periodic autocorrelations for a perfectly periodic microstructure with equal volume fractions. In general this is not the case and the autocorrelations will be different as we will see later in this example.
Step6: We can compute the cross-correlation of the microstructure function using the crosscorrelate function from pymks.stats
Step7: Let's take a look at the cross correlation using draw_crosscorrelations from pymks.tools.
Step8: Notice that the crosscorrelation is the exact opposite of the 2 autocorrelations. The (0, 0) vector has a value of 0. This statistic reflects the probablity of 2 phases having the same location. In our microstructure, this probability is zero as we have not allowed the two phases (colored black and white) to co-exist in the same spatial voxel.
Step9: Compute Non-Periodic 2-Point Statistics
Step10: All or some of the correlations can be viewed using the draw_correlations function from pymks.tools. In this example we will look at all of them.
Step11: Notice that the maximum values for the autocorrelations are higher than 0.5. We can still show that the centers or the (0, 0) vectors are still equal to the volume fractions.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
from pymks.datasets import make_checkerboard_microstructure
X = make_checkerboard_microstructure(square_size=21, n_squares=8)
from pymks.tools import draw_microstructures
draw_microstructures(X)
print X.shape
from pymks.stats import autocorrelate
from pymks import PrimitiveBasis
prim_basis = PrimitiveBasis(n_states=2)
X_ = prim_basis.discretize(X)
X_auto = autocorrelate(X_, periodic_axes=(0, 1))
from pymks.tools import draw_autocorrelations
correlations = [('black', 'black'), ('white', 'white')]
draw_autocorrelations(X_auto[0], autocorrelations=correlations)
center = (X_auto.shape[1] + 1) / 2
print 'Volume fraction of black phase', X_auto[0, center, center, 0]
print 'Volume fraction of white phase', X_auto[0, center, center, 1]
from pymks.stats import crosscorrelate
X_cross = crosscorrelate(X_, periodic_axes=(0, 1))
from pymks.tools import draw_crosscorrelations
correlations = [('black', 'white')]
draw_crosscorrelations(X_cross[0], crosscorrelations=correlations)
print 'Center value', X_cross[0, center, center, 0]
from pymks.stats import correlate
X_corr = correlate(X_)
from pymks.tools import draw_correlations
correlations = [('black', 'black'), ('white', 'white'), ('black', 'white')]
draw_correlations(X_corr[0].real, correlations=correlations)
print 'Volume fraction of black phase', X_corr[0, center, center, 0]
print 'Volume fraction of white phase', X_corr[0, center, center, 1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add class data
Step2: Visualizing high-dimensional data
Step3: t-distributed Stochastic Neighbor Embedding (TSNE)
Step4: Principal component analysis (PCA)
|
<ASSISTANT_TASK:>
Python Code:
# Load data
dat = pd.read_csv("lol_base_stats.tsv", sep="\t")
dat.head()
from bs4 import BeautifulSoup
import requests
primary_role = []
for url in dat.href:
html_data = requests.get(url).text
soup = BeautifulSoup(html_data, "html5lib")
role = soup.find('div', attrs={'class' : 'champion_info'}).table.a.text
primary_role.append(role)
dat["primary_role"] = primary_role
dat.head()
# Save data
dat.to_csv("lol_base_stats-roles.tsv", index=False, sep="\t")
# Define colors
my_colors = ['b', 'r', 'm', 'g', 'k', 'y']
my_colors_key = {
'Controller' : 'b',
'Fighter' : 'r',
'Mage' : 'm',
'Marksman' : 'g',
'Slayer' : 'k',
'Tank' : 'y'
}
plt.rcParams["figure.figsize"] = [10,4]
# How many champions of each type?
dat.groupby(["primary_role"]).count()["Champions"].plot.bar(color=my_colors)
plt.ylabel("count")
plt.xlabel("Primary role (according to wikia's Base champion stats)")
# Use only complete cases
datc = pd.DataFrame.dropna(dat)
datc = datc.iloc[:, 1:-2]
# Plot t-SNE at different perplexities
plt.rcParams["figure.figsize"] = [15,15]
nrows = 4
ncols = 4
fig, ax = plt.subplots(nrows, ncols)
perplexity = list(range(50, 4, -3))
for i in range(nrows):
for j in range(ncols):
p = perplexity.pop()
# Run TSNE
model = TSNE(n_components=2, perplexity=p, random_state=0)
X = model.fit_transform(datc)
xlabel = "TNSE1"
ylabel = "TNSE2"
for k in my_colors_key.keys():
X_subset = X[dat.dropna()["primary_role"] == k,]
x = X_subset[:,0]
y = X_subset[:,1]
ax[i,j].scatter(x, y, color=my_colors_key[k])
ax[i,j].title.set_text("perplexity = {}".format(p))
ax[i,j].set(xlabel=xlabel, ylabel=ylabel)
plt.rcParams["figure.figsize"] = [6,6]
fig, ax = plt.subplots(1, 1)
# Run PCA
pca = PCA(n_components=2)
pca.fit(datc)
X = pca.transform(datc)
xlabel = "PC1"
ylabel = "PC2"
for k in my_colors_key.keys():
X_subset = X[dat.dropna()["primary_role"] == k,]
x = X_subset[:,0]
y = X_subset[:,1]
ax.scatter(x, y, color=my_colors_key[k])
ax.set(xlabel=xlabel, ylabel=ylabel)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-esm2-hr5', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use the self-written function to assess the fit
Step2: Returns the evaluators by self-written functions (we first fit HT+CL)
Step3: Plot a diagram of the evaluators change with alphas
Step4: We want to know the selected coefficients for the training data
Step5: Now we try doing the cooling and heating separately
|
<ASSISTANT_TASK:>
Python Code:
imp_bi = Imputer(missing_values='NaN', strategy='most_frequent', axis = 0)
imp_bi.fit(Predictor[:,bi_no_index])
Predictor[:,bi_no_index] = imp_bi.transform(Predictor[:,bi_no_index])
imp_num = Imputer(missing_values='NaN', strategy='median', axis = 0)
imp_num.fit(Predictor[:,numeric_index])
Predictor[:,numeric_index] = imp_num.transform(Predictor[:,numeric_index])
Predictor.shape
imp_tar = Imputer(missing_values = 'NaN', strategy = 'median', axis = 0)
imp_tar.fit(Target)
Target = imp_tar.transform(Target)
Target.shape
trainPredictor = Predictor[0:len(Predictor)//2]
testPredictor = Predictor[len(Predictor)//2: len(Predictor)]
trainTarget = Target[0:len(Target)//2]
testTarget = Target[len(Target)//2:len(Target)]
print(len(trainPredictor) + len(testPredictor) == len(Predictor))
print(len(trainTarget) + len(testTarget) == len(Target))
print(len(trainPredictor) == len(trainTarget))
print(len(testPredictor) == len(testTarget))
print(trainPredictor.shape)
print(trainTarget.shape)
#the function returns the lasso coefficients generated by the fit
def lasso_fit(alpha, predictor, Target):
clf = linear_model.Lasso(alpha=alpha)
clf.fit(predictor,Target)
coefficient = clf.coef_
return coefficient
# the function returns the y matrix of test dataset
def lasso_results(alpha_input, train_X, train_y, test_X, test_y):
clf = linear_model.Lasso(alpha=alpha_input)
clf.fit(train_X,train_y)
#print(clf.coef_.shape)
#print(clf.intercept_)
oneMatrix = np.ones((len(test_X),1))
DesignMatrix = np.concatenate((test_X, oneMatrix),axis = 1)
#print(DesignMatrix.shape)
coefficients = np.concatenate((clf.coef_ , [clf.intercept_]), axis = 0)
#print(coefficients.shape
testResults = np.dot(DesignMatrix, coefficients)
return testResults
# the function returns the evaluator of the lasso fit (r_square, mse)
def lasso_test(alpha_input, train_X, train_y, test_X, test_y, testWeight):
r_square = []
mse = []
for a in alpha_input:
testResults = lasso_results(a, train_X, train_y, test_X, test_y)
r_square.append(r2_score(test_y, testResults, sample_weight = testWeight))
mse.append(mean_squared_error(test_y, testResults, sample_weight =testWeight))
index = mse.index(min(mse))
print(r_square[index])
print(mse[index])
print(alpha_input[index])
evaluator = np.stack((r_square, mse), axis = 1)
return evaluator
#find best fit using LassoCV
def lasso_cros(alpha_input, train_X, train_Y, test_X, test_Y):
clf2 = LassoCV(alphas = alpha_range, cv = 5)
clf2.fit(train_X, train_Y)
print(clf2.score(train_X, train_Y))
print(clf2.alpha_)
#print(alpha_range.shape)
print(clf2.mse_path_.shape)
print(clf2.intercept_)
#print(clf2.alphas_)
#print(clf2.mse_path_.mean(axis=-1))
plt.figure(figsize=(15,10))
plt.plot(np.log10(clf2.alphas_), clf2.mse_path_[:], '--')
plt.plot(np.log10(clf2.alphas_), clf2.mse_path_.mean(axis=-1), 'k-')
plt.show()
return {'alpha':clf2.alpha_, 'r_square':clf2.score(train_X, train_Y), 'intercept':clf2.intercept_}
#the function find the number of feathers selected and returns the
#index of the features selected
def find_features(coeff):
index = []
count = 0
for i in range(len(coeff)):
if coeff[i] != 0:
index.append(i)
count += 1
return {'index':index, 'count': count}
#this function give the number of features selected under each alpha
def num_feature(alpha_input, train_X, train_y):
num_feature = []
for alpha_input in alpha_input:
clf = linear_model.Lasso(alpha=alpha_input)
clf.fit(train_X,train_y)
num = find_features(clf.coef_)["count"]
num_feature.append(num)
return num_feature
alpha_range = np.logspace(-5, 5, num = 1000, base = 10)
evaluator = lasso_test(alpha_range, trainPredictor, trainTarget[:,4], testPredictor, testTarget[:,4], testTarget[:,5])
#and number of features selected from given alpha range
num_feature = num_feature(alpha_range, trainPredictor, trainTarget[:,4])
#Plot the results
plt.figure(figsize=(8,10))
plt.subplot(211)
plt.plot(np.log10(np.logspace(-5, 5, num = 100, base =10)), evaluator[:,1], 'k-')
plt.ylabel("Mean Square Error")
plt.xlabel("log10(alpha)")
plt.title("Change of Mearn Square Error with Tuning Parameter")
plt.subplot(212)
plt.plot(np.log10(np.logspace(-5, 5, num = 100, base =10)), num_feature, 'k-')
plt.ylabel("Number of features selected")
plt.xlabel("log10(alpha)")
plt.title("Change of Number of features selected with Tuning Parameter")
plt.show()
lasso_cros(alpha_range, trainPredictor, trainTarget[:,4], testPredictor, testTarget[:,4])
coefficient_to = lasso_fit(0.094183315346479515, trainPredictor, trainTarget[:,4])
features = find_features(coefficient_to)
testResults_total = lasso_results(0.17475284000076829, trainPredictor, trainTarget[:,4], testPredictor, testTarget[:,4])
fig2= plt.figure(figsize=(20, 10))
plt.plot(list(range(len(testResults_total))), testResults_total, 'ko')
plt.plot(list(range(len(testResults_total))), testTarget[:,4], 'ro')
plt.ylim(-20,500)
plt.xlim(0,len(testResults_total))
print(testTarget[:,4])
to_feature_index = find_features(coefficient_to)["index"]
code[to_feature_index, 1]
def feature_reduc(alpha_input, train_X, train_y, threshold):
feature_num = len(train_X[0])
while feature_num > threshold:
clf = linear_model.Lasso(alpha=alpha_input)
clf.fit(train_X,train_y)
feature_index = find_features(clf.coef_)["index"]
feature_num = len(feature_index)
alpha_input = alpha_input * 1.2
return {'alpha':alpha_input, 'feature_index': feature_index}
# Target 1 most important feature
code[feature_reduc(10, Predictor, Target[:,0],1)["feature_index"],1]
# Target 3 most important feature
code[feature_reduc(10, Predictor, Target[:,0],3)["feature_index"],1]
# Target 5 most important feature
code[feature_reduc(10, Predictor, Target[:,0],5)["feature_index"],1]
# Target 10 most important feature
code[feature_reduc(10, Predictor, Target[:,0],10)["feature_index"],1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this example notebook, we'll be using the elevators UCI dataset used in the paper. Running the next cell downloads a copy of the dataset that has already been scaled and normalized appropriately. For this notebook, we'll simply be splitting the data using the first 80% of the data as training and the last 20% as testing.
Step2: Defining the SGPR Model
Step3: Training the model
Step4: Making Predictions
|
<ASSISTANT_TASK:>
Python Code:
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
# Make plots inline
%matplotlib inline
import urllib.request
import os
from scipy.io import loadmat
from math import floor
# this is for running the notebook in our testing framework
smoke_test = ('CI' in os.environ)
if not smoke_test and not os.path.isfile('../elevators.mat'):
print('Downloading \'elevators\' UCI dataset...')
urllib.request.urlretrieve('https://drive.google.com/uc?export=download&id=1jhWL3YUHvXIaftia4qeAyDwVxo6j1alk', '../elevators.mat')
if smoke_test: # this is for running the notebook in our testing framework
X, y = torch.randn(1000, 3), torch.randn(1000)
else:
data = torch.Tensor(loadmat('../elevators.mat')['data'])
X = data[:, :-1]
X = X - X.min(0)[0]
X = 2 * (X / X.max(0)[0]) - 1
y = data[:, -1]
train_n = int(floor(0.8 * len(X)))
train_x = X[:train_n, :].contiguous()
train_y = y[:train_n].contiguous()
test_x = X[train_n:, :].contiguous()
test_y = y[train_n:].contiguous()
if torch.cuda.is_available():
train_x, train_y, test_x, test_y = train_x.cuda(), train_y.cuda(), test_x.cuda(), test_y.cuda()
X.size()
from gpytorch.means import ConstantMean
from gpytorch.kernels import ScaleKernel, RBFKernel, InducingPointKernel
from gpytorch.distributions import MultivariateNormal
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = ConstantMean()
self.base_covar_module = ScaleKernel(RBFKernel())
self.covar_module = InducingPointKernel(self.base_covar_module, inducing_points=train_x[:500, :], likelihood=likelihood)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
if torch.cuda.is_available():
model = model.cuda()
likelihood = likelihood.cuda()
training_iterations = 2 if smoke_test else 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
def train():
for i in range(training_iterations):
# Zero backprop gradients
optimizer.zero_grad()
# Get output from model
output = model(train_x)
# Calc loss and backprop derivatives
loss = -mll(output, train_y)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))
optimizer.step()
torch.cuda.empty_cache()
# See dkl_mnist.ipynb for explanation of this flag
%time train()
model.eval()
likelihood.eval()
with gpytorch.settings.max_preconditioner_size(10), torch.no_grad():
with gpytorch.settings.max_root_decomposition_size(30), gpytorch.settings.fast_pred_var():
preds = model(test_x)
print('Test MAE: {}'.format(torch.mean(torch.abs(preds.mean - test_y))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Search patterns
Step2: Search and capture patterns
Step3: Case insensitive search
Step4: Without re.compile()
Step5: With re.compile()
|
<ASSISTANT_TASK:>
Python Code:
s = "Maison 3 pièce(s) - 68.05 m² - 860 € par mois charges comprises"
re.findall(r'\d+\.?\d*', s)
re.findall(r'\b\d+\.?\d*\b', s)
s = "Maison 3 pièce(s) - 68.05 m² - 860 € par mois charges comprises"
if re.search(r'Maison', s):
print("Found!")
else:
print("Not found!")
if re.search(r'Appartement', s):
print("Found!")
else:
print("Not found!")
if re.match(r'Maison', s):
print("Found!")
else:
print("Not found!")
s = "Maison 3 pièce(s) - 68.05 m² - 860 € par mois charges comprises"
m = re.search(r'\b(\d+) pièce', s)
if m:
print(int(m.group(1)))
else:
print("Not found!")
m = re.search(r'\b(\d+\.?\d*) m²', s)
if m:
print(float(m.group(1)))
else:
print("Not found!")
m = re.search(r'\b(\d+\.?\d*) €', s)
if m:
print(float(m.group(1)))
else:
print("Not found!")
s = "Maison 3 PIÈce(s) - 68.05 m² - 860 € par mois charges comprises"
m = re.search(r'\b(\d+) pièce', s, re.IGNORECASE)
if m:
print(int(m.group(1)))
else:
print("Not found!")
num_pieces = re.compile(r'\b(\d+) pièce', re.IGNORECASE)
m = num_pieces.search(s)
if m:
print(int(m.group(1)))
else:
print("Not found!")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Proportional oversampling
Step2: If the event in a classification problem or the value in a prediction problem is imbalanced (usually toward zero) this can lead to biased models, single class predictions for classification models, and biased predictions for prediction models. The simplest approach for an imbalanced target is to oversample the data set.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # pandas for handling mixed data sets
import numpy as np # numpy for basic math and matrix operations
# imbalanced-learn for oversampling
from imblearn.over_sampling import RandomOverSampler
scratch_df = pd.DataFrame({'x': pd.Series(np.arange(0, 10)),
'y': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0]})
scratch_df
# fit random oversampling function
# cannot pass single array for X, must use numpy.reshape(-1, 1)
ros = RandomOverSampler()
over_sample_x, over_sample_y = ros.fit_sample(scratch_df.x.get_values().reshape(-1, 1),
scratch_df.y)
# create Pandas dataframe from oversampling results
over_sample_df = pd.DataFrame({'over_sample_x': over_sample_x.reshape(16,),
'over_sample_y': over_sample_y})
over_sample_df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Display the PMF.
Step3: Define <tt>BiasPmf</tt>.
|
<ASSISTANT_TASK:>
Python Code:
kids = resp['numkdhh']
kids
pmf = thinkstats2.Pmf(kids)
thinkplot.Pmf(pmf, label='PMF')
thinkplot.Show(xlabel='# of Children', ylabel='PMF')
def BiasPmf(pmf, label=''):
Returns the Pmf with oversampling proportional to value.
If pmf is the distribution of true values, the result is the
distribution that would be seen if values are oversampled in
proportion to their values; for example, if you ask students
how big their classes are, large classes are oversampled in
proportion to their size.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Try writing a message to the OLED.
Step2: Import the ALS library, create an instance of the ALS Pmod, and read the value from the sensor.
Step3: Write the value from the ALS to the OLED. The ALS sensor returns an 8-bit value.
|
<ASSISTANT_TASK:>
Python Code:
from pynq import Overlay
from pynq.iop import Pmod_OLED
from pynq.iop import PMODB
ol = Overlay("base.bit")
ol.download()
oled = Pmod_OLED(PMODB)
oled.write("Hello World")
oled.clear()
from pynq.iop import Pmod_ALS
from pynq.iop import PMODA
als = Pmod_ALS(PMODA)
als.read()
oled.write("Light value : " + str(als.read()))
import time
from pynq.iop import Pmod_ALS
from pynq.iop import PMODA
als = Pmod_ALS(PMODA)
als.set_log_interval_ms(100)
als.start_log()
time.sleep(1)
als.stop_log()
als.get_log()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task 5.1. Apply Girvan-Newman method
Step2: Apply available Girvan-Newman algorithm and compare results
|
<ASSISTANT_TASK:>
Python Code:
# Import python-igraph library
import igraph
from IPython.display import Image
# Note: email graph is too large for the fast execution of the Girvan-Newman method, so we use karate graph,
# which is available on github and was taken from http://www.cise.ufl.edu/research/sparse/matrices/Newman/karate.html
gname = 'karate'
with open(gname + '.ncol', 'r') as finp:
g = igraph.Graph.Read_Ncol(finp, weights=False, directed=False)
igraph.summary(g)
# Visualize the input graph into karate.png
gimg = gname + '.png'
igraph.plot(g, target=gimg)
# Show the visualization
Image(filename=gimg)
# Cut dendogram at the level, which maximizes modularity (evaluated automatically)
vdr = g.community_edge_betweenness()
# Get clusters from the dendogram
vcs = vdr.as_clustering()
def printCommuns(vcs, aname):
'''Print resulting communities
vcs - communities as the VertexClustering object
aname - name of the algorithm
'''
# Evaluate the number of detected communities (clusters) and their sizes
csizes = vcs.sizes()
# Evaluate resulting modularity
Q = vcs.recalculate_modularity()
# Show results
print("Using {} clustering '{}' graph has modularity Q={}and contains {} communities of sizes: {}"
.format(gname, aname, Q, len(csizes), ', '.join([str(sz) for sz in csizes])))
def visualizeCommuns(g, vcs, aname):
'''Visualize communities
g - the graph to be visualized
vcs - communities as the VertexClustering object
aname - name of the algorithm
return - visualization of communities on the graph
'''
# Define distinct colors for the communities
colors = ['red', 'yellow', 'blue', 'green', 'purple', 'cyan', 'black']
# Assign colors to each vertex according to the cluster
for icl, cl in enumerate(vcs):
for v in cl:
g.vs[v]['color'] = colors[icl]
# Transform algorithm name to the file prefix
fpref = '_'.join(aname.lower().split('-'))
# Visualize detected communities on the input graph
cgnimg = fpref + '_' + gname + ".png"
print(cgnimg)
igraph.plot(g, target=cgnimg) # , vertex_size=6
return Image(cgnimg)
# Show results
aname = 'Girvan-Newman'
printCommuns(vcs, aname)
visualizeCommuns(g, vcs, aname)
# Get communities (clusters) corresponding to the best modularity (top level of the built hierarchy)
vcs = g.community_multilevel()
# Show results
aname = 'Louvain'
printCommuns(vcs, aname)
visualizeCommuns(g, vcs, aname)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Данные были взяты из репозитория UCI Machine Learning Repository по адресу http
Step2: В исследуемых данных мы имеем следующее число точек
Step3: Загруженные данные разбиваем на две выборки
Step4: В обучающей выборке имеем столько наблюдений
Step5: Рассмотрим SVM в линейно неразделимом случае с $L^1$ нормой на зазоры $(\xi_i){i=1}^n$
Step6: Параметры вида ядра (и соответственно отображений признаков $\phi
Step7: полимониальное
Step8: и линейное (в $\mathbb{R}^d$)
Step9: Результаты поиска приведены ниже
Step10: Посмотрим точность лучших моделей в каждом классе ядер на тестовтй выборке.
Step11: Гауссовское ядро
Step12: Полимониальное ядро
Step13: Построим ROC-AUC кривую для лучшей моделей.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
from sklearn import *
%matplotlib inline
random_state = np.random.RandomState( None )
def collect_result( grid_, names = [ ] ) :
df = pd.DataFrame( { "2-Отклонение" : [ np.std(v_[ 2 ] ) for v_ in grid_.grid_scores_ ],
"1-Точность" : [ v_[ 1 ] for v_ in grid_.grid_scores_ ], },
index = pd.MultiIndex.from_tuples(
[ v_[ 0 ].values() for v_ in grid_.grid_scores_ ],
names = names ) )
df.sort_index( )
return df
df = pd.read_csv( 'data_banknote_authentication.txt', sep = ",", decimal = ".", header = None,
names = [ "variance", "skewness", "curtosis", "entropy", "class" ] )
y = df.xs( "class", axis = 1 )
X = df.drop( "class", axis = 1 )
print len( X )
X_train, X_test, y_train, y_test = cross_validation.train_test_split( X, y, test_size = 0.60,
random_state = random_state )
print len( X_train )
svm_clf_ = svm.SVC( probability = True, max_iter = 100000 )
## Вид ядра : Гауссовское ядро
grid_rbf_ = grid_search.GridSearchCV( svm_clf_, param_grid = {
## Параметр регуляризции: C = 0.0001, 0.001, 0.01, 0.1, 1, 10.
"C" : np.logspace( -4, 1, num = 6 ),
"kernel" : [ "rbf" ],
## Параметр "концентрации" Гауссовского ядра
"gamma" : np.logspace( -2, 2, num = 10 ),
}, cv = 5, n_jobs = -1, verbose = 0 ).fit( X_train, y_train )
df_rbf_ = collect_result( grid_rbf_, names = [ "Ядро", "C", "Параметр" ] )
## Вид ядра : Полиномиальное ядро
grid_poly_ = grid_search.GridSearchCV( svm.SVC( probability = True, max_iter = 20000, kernel = "poly" ), param_grid = {
## Параметр регуляризции: C = 0.0001, 0.001, 0.01, 0.1, 1, 10.
"C" : np.logspace( -4, 1, num = 6 ),
"kernel" : [ "poly" ],
## Степень полиномиального ядра
"degree" : [ 2, 3, 5, 7 ],
}, cv = 5, n_jobs = -1, verbose = 0 ).fit( X_train, y_train )
df_poly_ = collect_result( grid_poly_, names = [ "Ядро", "C", "Параметр" ] )
## Вид ядра : линейное ядро
grid_linear_ = grid_search.GridSearchCV( svm_clf_, param_grid = {
## Параметр регуляризции: C = 0.0001, 0.001, 0.01, 0.1, 1, 10.
"C" : np.logspace( -4, 1, num = 6 ),
"kernel" : [ "linear" ],
"degree" : [ 0 ]
}, cv = 5, n_jobs = -1, verbose = 0 ).fit( X_train, y_train )
df_linear_ = collect_result( grid_linear_, names = [ "Ядро", "C", "Параметр" ] )
pd.concat( [ df_linear_, df_poly_, df_rbf_ ], axis = 0 ).sort_index( )
print grid_linear_.best_estimator_
print "Accuracy: %0.3f%%" % ( grid_linear_.best_estimator_.score( X_test, y_test ) * 100, )
print grid_rbf_.best_estimator_
print "Accuracy: %0.3f%%" % ( grid_rbf_.best_estimator_.score( X_test, y_test ) * 100, )
print grid_poly_.best_estimator_
print "Accuracy: %0.3f%%" % ( grid_poly_.best_estimator_.score( X_test, y_test ) * 100, )
result_ = { name_: metrics.roc_curve( y_test, estimator_.predict_proba( X_test )[:,1] )
for name_, estimator_ in {
"Linear": grid_linear_.best_estimator_,
"Polynomial": grid_poly_.best_estimator_,
"RBF": grid_rbf_.best_estimator_ }.iteritems( ) }
fig = plt.figure( figsize = ( 16, 9 ) )
ax = fig.add_subplot( 111 )
ax.set_ylim( -0.1, 1.1 ) ; ax.set_xlim( -0.1, 1.1 )
ax.set_xlabel( "FPR" ) ; ax.set_ylabel( u"TPR" )
ax.set_title( u"ROC-AUC" )
for name_, value_ in result_.iteritems( ) :
fpr, tpr, _ = value_
ax.plot( fpr, tpr, lw=2, label = name_ )
ax.legend( loc = "lower right" )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 区间缩放法
Step2: 标准化与归一化的区别
|
<ASSISTANT_TASK:>
Python Code:
iris.data
from sklearn.preprocessing import StandardScaler
# 标准化, 返回值为标准化后的数据
iris_standard = StandardScaler().fit_transform(iris.data)
from sklearn.preprocessing import MinMaxScaler
#区间缩放,返回值为缩放到[0, 1]区间的数据
iris_minmax = MinMaxScaler().fit_transform(iris.data)
from sklearn.preprocessing import Normalizer
#归一化,返回值为归一化后的数据
iris_norm = Normalizer().fit_transform(iris.data)
iris_norm
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <p style = "font-size
Step2: Preamble
Step3: EM and MNIST
Step4: Classify using the maximum aposteriori rule.
Step5: A procedure to compute the log-likelihood of each observaton with respect to each mixture component. Used in the posterior computation.
Step6: The actual procedure for computing the E*-step
Step7: Analytic solution
Step8: A wrapper to match the assignment specifications.
Step9: A it has been mentioned eariler, the EM algorithm switches between E and M steps until convergence.
Step10: The procedure above actually invokes the true EM core, defined below.
Step11: Define a convenient procedure for running experiments. By setting relative error to zero the algorithm is forced to exhaust all the allocated iterations.
Step12: Miscellanea
Step13: The folowing pair of procedures are used to plot the digits in a clear manner. The first one just creates a canvas for the image
Step14: This procedure displays the images on a nice plot. Used for one-line visualization.
Step15: MIscellanea
Step17: Define a function that produces (using ffmpeg) and embeds a video in HTML into IPython
Step18: Miscellanea
Step19: Or obtain the data from the provided CSV files.
Step20: Study
Step21: Case
Step22: They do indeed look quite distinct. Now collect them into a single dataset and estimate the model.
Step23: The estimate deltas show that the EM algorithm's E-step actually transfers the unlikely observations between classes, as is expected by constructon of the algorithm.
Step24: Now let's see how well the EM algorithm performs on a model with more classes. But before that let's have a look at a random sample of the handwritten digits.
Step25: Case
Step26: Run the procedure that perfoems EM algorithm and return the history of the parameter estimates as well as the dynamics of the log-likelihood lower bonud.
Step27: One can clearly see, that $50$ iterations were not enough for the alogirithm to converge
Step28: Let's see if changing $K$ does the trick.
Step29: For what values of $K$ was it possible to infer the templates of all digits?
Step30: Obviously, the model with more mixture components is more likely to produce "templates" for all digits. For larger $K$ this is indeed the case.
Step31: As one can see, increasing the number of iterations does not necessarily improve the results.
Step32: Judging by the plot of the log-likelihood, the fact that the EM is guaranteed to converge to local maxima and does so extremely fast, there was no need for more than 120-130 iterations. The chages in the log-likelihood around that number of iterations are of the order $10^{-4}$. Since we are working in finite precision arithmetic (double), the smallest precision is $\approx 10^{-14}$.
Step33: The parameter estimates of the EM stabilize pretty quickly. In fact most templates stabilize by iterations 100-120.
Step34: Indeed the log-likelihood does not decrease with $K$ on average. Nevertheless the model with the highes likelihood turs out to have this many mixture components
Step35: A nice, yet expected coincidence
Step36: ... and get the posterior mixture component probabilities.
Step37: Use a simple majority rule to automatically assign lables to templates.
Step38: Assign the labels $l$ to templates $t$ according to its score, based on the average of the top-$5$ log-likelihoods of observations with label $l$ and classfified with template $t$.
Step39: Compare the label assignments. Here are the templates.
Step40: These are the templates, which were assigned different labels by the majority and "trust" methods.
Step41: Here are the pictures of templates ordered according to their label.
Step42: Classification
Step43: Let's see the best template for each test observation in some sub-sample.
Step44: The digits are shown in pairs
Step45: Not surprisingly, majority- and likelihood-based classification accuracies are close.
Step46: </hr>
Step47: Accuracy of $K=20$
Step48: Accuracy of $K=15$
Step49: Accuracy of $K=10$
Step50: As one can see test sample accuracy of the model falls drammatically for less number of mixture components. This was expected, since due to various reasons, one being thet the data is handwritten, it is higly unlikely, that a single digit would have only one template.
Step51: <br/><p style="font-size
Step52: <hr/>
|
<ASSISTANT_TASK:>
Python Code:
## Add JS-based table of contents
from IPython.display import HTML as add_TOC
add_TOC( u<h1 id="tocheading">Table of Contents</h1></br><div id="toc"></div>
<script src="https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js"></script></br></hr></br> )
import os, time as tm, warnings
warnings.filterwarnings( "ignore" )
# from IPython.core.display import HTML
from IPython.display import display, HTML
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed( 569034853 )
## This is the correct way to use the random number generator,
## since it allows finer control.
rand = np.random.RandomState( np.random.randint( 0x7FFFFFFF ) )
## A bunch of wrappers to match the task specifications
def posterior( x, clusters ) :
pi = np.ones( clusters.shape[ 0 ], dtype = np.float ) / clusters.shape[ 0 ]
q, ll = __posterior( x, theta = clusters, pi = pi )
return q
## The likelihood is a byproduct of the E-step's minimization of Kullback-Leibler
def likelihood( x, clusters ) :
pi = np.ones( clusters.shape[ 0 ], dtype = np.float ) / clusters.shape[ 0 ]
q, ll = __posterior( x, theta = clusters, pi = pi )
return np.sum( ll )
## Classifier
def classify( x, theta, pi = None ) :
pi = pi if pi is not None else np.ones( theta.shape[ 0 ], dtype = np.float ) / theta.shape[ 0 ]
## Compute the posterior probabilities of the data
q_sk, ll_s = __posterior( x, theta = theta, pi = pi )
## Classify according to max pasterior:
c_s = np.argmax( q_sk, axis = 1 )
return c_s, q_sk, ll_s
def __component_likelihood( x, theta ) :
## Unfortunately sometimes there can be negative machine zeros, which
## spoil the log-likelihood computation by poisoning with NANs.
## That is why the theta array is restricted to [0,1].
theta_clipped = np.clip( theta, 0.0, 1.0 )
## Iterate over classes
ll_sk = np.zeros( ( x.shape[ 0 ], theta.shape[ 0 ] ), dtype = np.float )
## Make a binary mask of the data
mask = x > 0
for k in xrange( theta.shape[ 0 ] ) :
## Note that the power formulation is just a mathematically convenient way of
## writing \theta if x=1 or (1-\theta) otherwise.
ll_sk[ :, k ] = np.sum( np.where( mask,
np.log( theta_clipped[ k ] ), np.log( 1 - theta_clipped[ k ] ) ), axis = ( 1, ) )
return ll_sk
## The core procedure for computing the conditional density of classes
def __posterior( x, theta, pi ) :
## Get the log-likelihoods of each observation in each mixture component.
ll_sk = __component_likelihood( x, theta )
## Find the largest unnormalized probability.
llstar_s = np.reshape( np.max( ll_sk, axis = ( 1, ) ), ( ll_sk.shape[ 0 ], 1 ) )
## Subtract the largest exponent
ll_sk -= llstar_s
## In the rare case when the largest exponent is -Inf, force the differences
## to zero. This effective treaks such observations as having unfiorm likelihood
## across classes. This way the priors don't get masked by really small numbers.
## I could've used ``np.nan_to_num( ll_sk - llstar_s )'' but it actually copies
## the ll_sk array.
ll_sk[ np.isnan( ll_sk ) ] = 0.0
## Don't forget to add the log-prior probability (Numpy broadcasting applies!).
## Adding priors before dealing with infinities would mask then and yield
## incorrect estimates of the log-likelihoods!
ll_sk += np.log( np.reshape( pi, ( 1, ll_sk.shape[ 1 ] ) ) )
## Compute the log-sum-exp of the individual log-likelihoods. Negative infinities
## resolve to 0.0 while the largest exponent resolves to a one. This step cannot
## produce NaNs
ll_s = np.reshape( np.log( np.sum( np.exp( ll_sk ), axis = ( 1, ) ) ), ( ll_sk.shape[ 0 ], 1 ) )
## The sum-exp could never be anything lower than 1, since at least one
## element of each row of ll_sk has to be lstar_s, whence the respective
## difference should be zero and the exponent -- 1. Thus even if the
## rest of the sum is close to machine zero, the logarithm would still
## return 0.
## Normalise the likelihoods to get conditional probability, and compute
## the sum of the log-denominator, which is the log-likelihood.
return np.exp( ll_sk - ll_s ), ll_s + llstar_s
## The E-step is simple: just compute the optimal parameters under
## the current conditional distribution of the latent variables.
def __learn_clusters( x, z ) :
## The prior class probabilities
pi = z.sum( axis = ( 0, ) )
## Pixel probabilities conditional on the calss
theta = np.tensordot( z, x, ( 0, 0 ) ) / pi.reshape( ( pi.shape[ 0 ], 1 ) )
## Return: regularization should be done at **E**-step!
# return np.clip( theta, 1.0/784, 1.0 - 1.0/784 ), pi / x.shape[ 0 ]
return theta, pi / x.shape[ 0 ]
## A wrapper for the above function
def learn_clusters( x, z ) :
theta, pi = __learn_clusters( x, z )
## Just return theta: in the condtional model the pi are fixed.
return theta
## A wrapper for the core em algorithm below
def em_algorithm( x, K, maxiter, verbose = True, rel_eps = 1e-4, full = False ) :
## Initialize the model parameters with uniform [0.25,0.75] random numbers
theta_1 = rand.uniform( size = ( K, x.shape[ 1 ] ) ) * 0.5 + 0.25
pi_1 = None if not full else np.ones( K, dtype = np.float ) / K
## Run the em algorithm
tick = tm.time( )
ll, theta, pi, status = __em_algorithm( x, theta_1 = theta_1,
pi_1 = pi_1, niter = maxiter, rel_eps = rel_eps, verbose = verbose )
tock = tm.time( )
print( "total %.3f, %.3f/iter" % ( ( tock - tick ), ( tock - tick ) / len( ll ), ) )
## Return the history of theta and the final log liklihood
if verbose :
if status[ 'status' ] != 0 :
print "Convergence not achieved. %d" % ( status[ 'status' ], )
if full :
return ( theta, pi ), ll
return theta, ll
## The core of the EM algorithm
def __em_algorithm( x, theta_1, pi_1 = None, niter = 1000, rel_eps = 1e-4, verbose = True ) :
## If we were supplied with an initial estimate of the prior distribution,
## then assume the full model is needed.
full_model = pi_1 is not None
## If the prior cluster probabilities are not supplied, assume uniform distribution.
pi_1 = pi_1 if full_model else np.ones( theta_1.shape[ 0 ], dtype = np.float ) / theta_1.shape[ 0 ]
## Allocate the necessary space for the history of model estimates
theta_hist, pi_hist = theta_1[ np.newaxis ].copy( ), pi_1[ np.newaxis ].copy( )
ll_hist = np.asarray( [ -np.inf ], dtype = np.float )
## Set "old" estimated to zero. At this line the current estimates are in fact
## the initially provided ones.
theta_0, pi_0 = np.zeros_like( theta_1 ), np.zeros_like( pi_1 )
## Initialize the loop
status, kiter, rel_theta, rel_pi, ll = -1, 0, np.nan, np.nan, -np.inf
while kiter < niter :
## Dump the current estimators and other information.
if verbose :
print( "Iteration %d: avg. log-lik: %.3f, $\\Theta$ div. %.3f, $\\Pi$ div. %.3f" % (
kiter, ll / x.shape[ 0 ], rel_theta, rel_pi ) )
show_data( theta_1 - theta_0 if True else theta_1, n = theta_0.shape[ 0 ],
n_col = min( 10, theta_0.shape[ 0 ] ), cmap = plt.cm.hot, interpolation = 'nearest' )
## The convergence criterion is the L^∞ norm of relative L^1 errors
if max( rel_pi, rel_theta ) < rel_eps :
status = 0
break ;
## Overwrite the initial estimates
theta_0, pi_0 = theta_1, pi_1
## E-step: call the core posterior function to get both the log-likelihood
## and the estimate of the conditional distribution.
z_1, ll_s = __posterior( x, theta_0, pi_0 )
## Sum the individual log-likelihoods of observations
ll = ll_s.sum()
## M-step: compute the optimal parameters under the current estimate of the posterior
theta_1, pi_1 = __learn_clusters( x, z_1 )
## Discard the computed estimate of pi if the model is discriminative (conditional likelihood).
if not full_model :
pi_1 = pi_0
## Record the current estimates to the history
theta_hist = np.vstack( ( theta_hist, theta_1[np.newaxis] ) )
pi_hist = np.vstack( ( pi_hist, pi_1[np.newaxis] ) )
ll_hist = np.append( ll_hist, ll )
## Check for bad float numbers
if not ( np.all( np.isfinite( theta_1 ) ) and np.all( np.isfinite( pi_1 ) ) ) :
status= -2
break ;
## Check convergence: L^1 relative error. If the relative margin is exactly
## zero, then return NaNs. This makes teh loop exhaust all iterations, since
## any comprison agains a NaN returns False.
rel_theta = np.sum( np.abs( theta_1 - theta_0 ) / ( np.abs( theta_0 ) + rel_eps ) ) if rel_eps > 0 else np.nan
rel_pi = np.sum( np.abs( pi_1 - pi_0 ) / ( np.abs( pi_0 ) + rel_eps ) ) if rel_eps > 0 else np.nan
## Next iteration
kiter += 1
return ll_hist, theta_hist, pi_hist, { 'status': status, 'iter': kiter }
def experiment( data, K, maxiter, verbose = True, until_convergence = False, full = False ) :
## Run the EM
return em_algorithm( data, K, maxiter, rel_eps = 1.0e-4 if until_convergence else 0.0, verbose = verbose, full = full )
## A more flexible image arrangement
def arrange_flex( images, n_row = 10, n_col = 10, N = 28, M = 28, fill_value = 0 ) :
## Create the final grid of images row-by-row
im_grid = np.full( ( n_row * N, n_col * M ), fill_value, dtype = images.dtype )
for k in range( min( images.shape[ 0 ], n_col * n_row ) ) :
## Get the grid cell at which to place the image
i, j = ( k // n_col ) * N, ( k % n_col ) * M
## Just put the image in the cell
im_grid[ i:i+N, j:j+M ] = np.reshape( images[ k ], ( N, M, ) )
return im_grid
def setup_canvas( axis, n_row, n_col, N = 28, M = 28 ) :
## Setup major tick marks to the seam between images and disable their labels
axis.set_yticks( np.arange( 1, n_row + 1 ) * N, minor = False )
axis.set_xticks( np.arange( 1, n_col + 1 ) * M, minor = False )
axis.set_yticklabels( [ ], minor = False ) ; axis.set_xticklabels( [ ], minor = False )
## Set minor ticks so that they are exactly between the major ones
axis.set_yticks( ( np.arange( n_row + 1 ) + 0.5 ) * N, minor = True )
axis.set_xticks( ( np.arange( n_col + 1 ) + 0.5 ) * M, minor = True )
## Make their labels into cell x-y coordinates
axis.set_yticklabels( [ "%d" % (i,) for i in 1+np.arange( n_row + 1 ) ], minor = True )
axis.set_xticklabels( [ "%d" % (i,) for i in 1+np.arange( n_col + 1 ) ], minor = True )
## Tick marks should be oriented outward
axis.tick_params( axis = 'both', which = 'both', direction = 'out' )
## Return nothing!
axis.grid( color = 'white', linestyle = '--' )
def show_data( data, n, n_col = 10, transpose = False, **kwargs ) :
## Get the number of rows necessary to plot the needed number of images
n_row = ( n + n_col - 1 ) // n_col
## Transpose if necessary
if transpose :
n_col, n_row = n_row, n_col
## Set the dimensions of the figure
fig = plt.figure( figsize = ( n_col, n_row ) )
axis = fig.add_subplot( 111 )
## Plot!
setup_canvas( axis, n_row, n_col )
axis.imshow( arrange_flex( data[:n], n_col = n_col, n_row = n_row ), **kwargs )
## Plot
plt.show( )
def visualize( data, clusters, ll, n_col = 2, plot_ll = True ) :
## Display the result
print "Final conditional log-likelihood value per observation achieved %f in %d iteration(s)" % (
ll[-1] / data.shape[ 0 ], len( ll ) )
## Plot the first difference of average log-likelihood
if plot_ll :
plt.figure( figsize = ( 12, 7 ) )
ax = plt.subplot(111)
ax.set_title( r"avg. log-likelihood change between successive iterations (log scale)" )
ax.plot( np.diff( ll / data.shape[ 0 ] ) )
# ax.set_ylabel( r"$\Delta_i \frac{1}{n} \sum_{s=1}^n \mathbb{E}_{z_s\sim q_i} \log p(x_s,z_s|\Theta_i)$" )
ax.set_ylabel( r"$\Delta_i \frac{1}{n} \sum_{s=1}^n \log p(x_s|\Theta_i)$" )
ax.set_yscale( 'log' )
## Plot the final estimates
if n_col > 0 :
show_data( clusters[-1], n = clusters.shape[1], n_col = n_col, cmap = plt.cm.spectral, interpolation = 'nearest' )
def animate( theta, ll, pi = None, n_col = 10, n_row = 10, interval = 1, **kwargs ) :
## Create a background
bg = arrange_flex( np.zeros_like( theta[ 0 ] ), n_col = n_col, n_row = n_row )
## Compute log-likelihood differences and sanitize them.
ll_diff = np.maximum( np.diff( ll ), np.finfo(np.float).eps )
ll_diff[ ~np.isfinite( ll_diff ) ] = np.nan
## Set up the figure, the axis, and the plot elements we want to animate
fig = plt.figure( figsize = ( 12, 12 ) )
## Create three subplots and sposition them specifically
if pi is None :
ax1, ax3, ax2 = fig.add_subplot( 311 ), fig.add_subplot( 312 ), fig.add_subplot( 313 )
else :
ax1, ax4 = fig.add_subplot( 411 ), fig.add_subplot( 412 )
ax3, ax2 = fig.add_subplot( 413 ), fig.add_subplot( 414 )
## Initialize different ranges for the image aritsts
setup_canvas( ax1, n_row = n_row, n_col = n_col )
ax1.set_title( r"Current estimate of the mixture components" )
setup_canvas( ax2, n_row = n_row, n_col = n_col )
ax2.set_title( r"Change between successive iterations" )
## Initialize geomtery for the delta log-likelihood plot.
ax3.set_xlim( -0.1, ll.shape[ 0 ] + 0.1 )
ax3.set_yscale( 'log' ) #; ax3.set_yticklabels( [ ] )
ax3.set_title( r"Change between successive iterations of EM (log scale)" )
ax3.set_ylabel( r"$\Delta_i \sum_{s=1}^n \log p(x_s|\Theta_i)$" )
ax3.set_ylim( np.nanmin( ll_diff ) * 0.9, np.nanmax( ll_diff ) * 1.1 )
ax3.grid( )
## Setup a plot for prior probabilites
if pi is not None :
classes = 1 + np.arange( len( pi[ 0 ] ) )
ax4.set_xticks( classes )
ax4.set_ylim( 0.0, 1.0 )
ax4.set_title( r"Current estimate of the mixture weights" )
ba1 = ax4.bar( classes, pi[ 0 ], align = "center" )
## Setup the artists
im1 = ax1.imshow( bg, vmin = +0.0, vmax = +1.0, **kwargs )
im2 = ax2.imshow( bg, vmin = -1.0, vmax = +1.0, **kwargs )
line1, = ax3.plot( [ ], linestyle = "-", color = 'blue' )
## Animation function. This is called sequentially
def update( i ) :
## Compute the frame
frame = theta[ i ] - theta[ i-1 ] if i > 0 else theta[ 0 ]
frame /= np.max( np.abs( frame ) )
## Draw frames on the image artists
im1.set_data( arrange_flex( theta[ i ], n_col = n_col, n_row = n_row ) )
im2.set_data( arrange_flex( frame, n_col = n_col, n_row = n_row ) )
if i > 0 :
## Show history on the line artist
line1.set_data( np.arange( i ), ll_diff[ :i ] )
if pi is not None :
[ b.set_height( h ) for b, h in zip( ba1, pi[ i ] ) ]
if i > 0 :
[ b.set_color( 'green' if h > p else 'red' ) for b, h, p in zip( ba1, pi[ i ], pi[ i-1 ] ) ]
## Return an iterator of artists in this frame
return ( im1, im2, line1, ) + ba1
return im1, im2, line1,
## Call the animator.
return animation.FuncAnimation( fig, update, frames = theta.shape[ 0 ], interval = interval, blit = True )
## Make simple animations of the EM estimatora
## http://jakevdp.github.io/blog/2013/05/12/embedding-matplotlib-animations/
## http://jakevdp.github.io/blog/2012/08/18/matplotlib-animation-tutorial/
from matplotlib import animation
from IPython.display import HTML
from tempfile import NamedTemporaryFile
def embed_video( anim ) :
VIDEO_TAG = <video controls autoplay muted loop><source src="data:video/x-m4v;base64,{0}"
type="video/mp4">Your browser does not support the video tag.</video>
plt.close( anim._fig )
if not hasattr( anim, '_encoded_video' ) :
ffmpeg_writer = animation.FFMpegWriter( )
with NamedTemporaryFile( suffix = '.mp4' ) as f:
anim.save( 'myanim.mp4', fps = 12, extra_args = [ '-vcodec', 'libx264' ] )# , writer = ffmpeg_writer )
video = open( 'myanim.mp4', "rb" ).read( )
anim._encoded_video = video.encode( "base64" )
return HTML( VIDEO_TAG.format( anim._encoded_video ) )
if False :
## Fetch MNIST dataset from SciKit and create a local copy.
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata( "MNIST original", data_home = './data/' )
np.savez_compressed('./data/mnist/mnist_scikit.npz', data = mnist.data, labels = mnist.target )
if False :
## The procedure below loads the MNIST data from a comma-separated text file.
def load_mnist_from_csv( filename ) :
## Read the CSV file
data = np.loadtxt( open( filename, "rb" ), dtype = np.short, delimiter = ",", skiprows = 0 )
## Peel off the lables
return data[:,1:], data[:,0]
## Fetch the data from the provided CSV (!) files and save as a compressed data blob
data, labels = load_mnist_from_csv( "./data/mnist/mnist_train.csv" )
np.savez_compressed( './data/mnist/mnist_train.npz', labels = labels, data = data )
data, labels = load_mnist_from_csv( "./data/mnist/mnist_test.csv" )
np.savez_compressed( './data/mnist/mnist_test.npz', labels = labels, data = data )
assert( os.path.exists( './data/mnist/mnist_train.npz' ) )
with np.load( './data/mnist/mnist_train.npz', 'r' ) as npz :
mnist_labels, mnist_data = npz[ 'labels' ], np.array( npz[ 'data' ] > 127, np.int )
assert( os.path.exists( './data/mnist/mnist_test.npz' ) )
with np.load( './data/mnist/mnist_test.npz', 'r' ) as npz :
test_labels, test_data = npz[ 'labels' ], np.array( npz[ 'data' ] > 127, np.int )
## Mask
inx_sixes, inx_nines = np.where( mnist_labels == 6 )[ 0 ], np.where( mnist_labels == 9 )[ 0 ]
## Extract
sixes = mnist_data[ rand.choice( inx_sixes, 90, replace = False ) ]
nines = mnist_data[ rand.choice( inx_nines, 90, replace = False ) ]
## Show
show_data( sixes, n = 45, n_col = 15, cmap = plt.cm.gray, interpolation = 'nearest' )
show_data( nines, n = 45, n_col = 15, cmap = plt.cm.gray, interpolation = 'nearest' )
data = mnist_data[ np.append( inx_sixes, inx_nines ) ]
clusters, ll = experiment( data, 2, 30 )
visualize( data, clusters, ll )
indices = np.arange( mnist_data.shape[ 0 ] )
rand.shuffle( indices )
show_data( mnist_data[ indices[:100] ] , n = 100, n_col = 10, cmap = plt.cm.gray, interpolation = 'nearest' )
sub_sample = np.concatenate( tuple( [ rand.choice( np.where( mnist_labels == i )[ 0 ], size = 200 ) for i in range( 10 ) ] ) )
train_data, train_labels = mnist_data[ sub_sample ], mnist_labels[ sub_sample ]
# train_data, train_labels = mnist_data, mnist_labels
clusters_10, ll_10 = experiment( train_data, 10, 50 )
visualize( train_data, clusters_10, ll_10, n_col = 10, plot_ll = True )
clusters_15, ll_15 = experiment( train_data, 15, 50, verbose = False, until_convergence = False )
clusters_20, ll_20 = experiment( train_data, 20, 50, verbose = False, until_convergence = False )
clusters_30, ll_30 = experiment( train_data, 30, 50, verbose = False, until_convergence = False )
visualize( train_data, clusters_15, ll_15, n_col = 10, plot_ll = False )
visualize( train_data, clusters_20, ll_20, n_col = 10, plot_ll = False )
visualize( train_data, clusters_30, ll_30, n_col = 10, plot_ll = False )
clusters_60, ll_60 = experiment( train_data, 60, 500, verbose = False, until_convergence = True )
visualize( train_data, clusters_60, ll_60, n_col = 15 )
## If you want to see this animation ensure that ffmpeg is installed and uncomment the following lines.
anim_60 = animate( clusters_60, ll_60, n_col = 15, n_row = 4,
interval = 1, cmap = plt.cm.hot, interpolation = 'nearest' )
embed_video( anim_60 )
## Test model with K from 12 up to 42 with a step of 4
classes = 12 + np.arange( 11, dtype = np.int ) * 3
ll_hist = np.full( len( classes ), -np.inf, dtype = np.float )
## Store parameters
parameter_hist = list( )
for i, K in enumerate( classes ) :
## Run the experiment
c, l = experiment( train_data, K, 50, verbose = False, until_convergence = False )
ll_hist[ i ] = l[ -1 ]
parameter_hist.append( c[ -1 ] )
## Visualize the final parameters
show_data( c[-1], n = K, n_col = 13, cmap = plt.cm.hot, interpolation = 'nearest' )
print classes[ np.argmax( ll_hist ) ]
# clusters = parameter_hist[ np.argmax( ll_hist ) ] * 0.999 + 0.0005
clusters = clusters_60[-1]
## Compute the posterior component probabilities, and use max-aposteriori
## for the best class selection.
c_s, q_sk, ll_s = classify( train_data, clusters )
template_x_label_maj_60 = np.full( clusters.shape[ 0 ], -1, np.int )
for t in range( clusters.shape[ 0 ] ) :
l, f = np.unique( train_labels[ c_s == t ], return_counts = True)
if len( l ) > 0 :
## This is too blunt an approach: it does not guarantee surjectivity of the mapping.
template_x_label_maj_60[ t ] = l[ np.argmax( f ) ]
## there are 10 labels and K templates
label_cluster_score = np.full( ( clusters.shape[ 0 ], 10 ), -np.inf, np.float )
## Loop over each template
for t in range( clusters.shape[ 0 ] ) :
## The selected templates are chosen according to max-aposteriori rule.
inx = np.where( c_s == t )[ 0 ]
## Get the assigned lables and their frequencies
actual_labels = train_labels[ inx ]
l, f = np.unique( actual_labels, return_counts = True )
## For each template and each associated label in the training set the
## score is average of the top-5 highest log-likelihoods.
label_cluster_score[ t, l ] = [ np.average( sorted(
ll_s[ inx[ actual_labels == a ] ].flatten( ), reverse = True )[ : 5 ] ) for a in l ]
## For each template choose the label with the highes likelihood.
template_x_label_lik_60 = np.argmax( label_cluster_score, axis = 1 )
show_data( clusters, clusters.shape[ 0 ], 10,
cmap = plt.cm.spectral, interpolation = 'nearest' )
mask = np.asarray( template_x_label_maj_60 != template_x_label_lik_60, dtype = np.float ).reshape( (-1,1) )
show_data( clusters * mask, clusters.shape[ 0 ], 10,
cmap = plt.cm.spectral, interpolation = 'nearest' )
print "\nLikelihood based: ", template_x_label_lik_60[ mask[:,0] > 0 ]
print "Majority bassed: ", template_x_label_maj_60[ mask[:,0] > 0 ]
show_data( clusters[ np.argsort( template_x_label_lik_60 ) ], clusters.shape[ 0 ],
10, cmap = plt.cm.spectral, interpolation = 'nearest' )
show_data( clusters[ np.argsort( template_x_label_maj_60 ) ], clusters.shape[ 0 ],
10, cmap = plt.cm.spectral, interpolation = 'nearest' )
## Run the classifier on the test data
c_s_60, q_sk, ll_s = classify( test_data, clusters )
## Show a sample of images and their templates
sample = np.random.permutation( test_data.shape[ 0 ] )[:64]
## Stack image and tis best template atop one another
display_stack = np.empty( ( 2 * len( sample ), test_data.shape[ 1 ] ), dtype = np.float )
display_stack[0::2] = test_data[ sample ] * q_sk[ sample, c_s_60[ sample ], np.newaxis ]
display_stack[1::2] = clusters[ c_s_60[ sample ] ]
## Display
show_data( display_stack, n = display_stack.shape[ 0 ], n_col = 16,
transpose = False, cmap = plt.cm.spectral, interpolation = 'nearest' )
print "Accuracy of likelihood based labelling: %.2f" % (
100 * np.average( template_x_label_lik_60[ c_s_60 ] == test_labels ), )
print "Accuracy of simple majority labelling: %.2f" % (
100 * np.average( template_x_label_maj_60[ c_s_60 ] == test_labels ), )
## Now display the test observations, which the model cold nor classify at all.
bad_tests = np.where( np.isinf( ll_s ) )[ 0 ]
show_data( test_data[ bad_tests ], n = max( len( bad_tests ), 10 ), n_col = 15, cmap = plt.cm.gray, interpolation = 'nearest' )
# print q_sk[ bad_tests ]
clusters = clusters_30[-1]
c_s, q_sk, ll_s = classify( train_data, clusters )
template_x_label_maj_30 = np.full( clusters.shape[ 0 ], -1, np.int )
for t in range( clusters.shape[ 0 ] ) :
l, f = np.unique( train_labels[ c_s == t ], return_counts = True)
if len( l ) > 0 :
template_x_label_maj_30[ t ] = l[ np.argmax( f ) ]
label_cluster_score_30 = np.full( ( clusters.shape[ 0 ], 10 ), -np.inf, np.float )
for t in range( clusters.shape[ 0 ] ) :
inx = np.where( c_s == t )[ 0 ]
actual_labels = train_labels[ inx ]
l, f = np.unique( actual_labels, return_counts = True )
label_cluster_score_30[ t, l ] = [ np.average( sorted(
ll_s[ inx[ actual_labels == a ] ].flatten( ), reverse = True )[ : 5 ] ) for a in l ]
template_x_label_lik_30 = np.argmax( label_cluster_score_30, axis = 1 )
show_data( clusters[ np.argsort( template_x_label_lik_30 ) ], clusters.shape[ 0 ],
10, cmap = plt.cm.spectral, interpolation = 'nearest' )
print template_x_label_lik_30[ np.argsort( template_x_label_lik_30 ) ].reshape((3,-1))
c_s_30, q_sk, ll_s = classify( test_data, clusters )
print "Accuracy of likelihood based labelling: %.2f" % (
100 * np.average( template_x_label_lik_30[ c_s_30 ] == test_labels ), )
print "Accuracy of simple majority labelling: %.2f" % (
100 * np.average( template_x_label_maj_30[ c_s_30 ] == test_labels ), )
clusters = clusters_20[-1]
c_s, q_sk, ll_s = classify( train_data, clusters )
template_x_label_maj_20 = np.full( clusters.shape[ 0 ], -1, np.int )
for t in range( clusters.shape[ 0 ] ) :
l, f = np.unique( train_labels[ c_s == t ], return_counts = True)
if len( l ) > 0 :
template_x_label_maj_20[ t ] = l[ np.argmax( f ) ]
label_cluster_score_20 = np.full( ( clusters.shape[ 0 ], 10 ), -np.inf, np.float )
for t in range( clusters.shape[ 0 ] ) :
inx = np.where( c_s == t )[ 0 ]
actual_labels = train_labels[ inx ]
l, f = np.unique( actual_labels, return_counts = True )
label_cluster_score_20[ t, l ] = [ np.average( sorted(
ll_s[ inx[ actual_labels == a ] ].flatten( ), reverse = True )[ : 5 ] ) for a in l ]
template_x_label_lik_20 = np.argmax( label_cluster_score_20, axis = 1 )
show_data( clusters[ np.argsort( template_x_label_lik_20 ) ], clusters.shape[ 0 ],
10, cmap = plt.cm.spectral, interpolation = 'nearest' )
print template_x_label_lik_20[ np.argsort( template_x_label_lik_20 ) ].reshape((2,-1))
c_s_20, q_sk, ll_s = classify( test_data, clusters )
print "Accuracy of likelihood based labelling: %.2f" % (
100 * np.average( template_x_label_lik_20[ c_s_20 ] == test_labels ), )
print "Accuracy of simple majority labelling: %.2f" % (
100 * np.average( template_x_label_maj_20[ c_s_20 ] == test_labels ), )
clusters = clusters_15[-1]
c_s, q_sk, ll_s = classify( train_data, clusters )
template_x_label_maj_15 = np.full( clusters.shape[ 0 ], -1, np.int )
for t in range( clusters.shape[ 0 ] ) :
l, f = np.unique( train_labels[ c_s == t ], return_counts = True)
if len( l ) > 0 :
template_x_label_maj_15[ t ] = l[ np.argmax( f ) ]
label_cluster_score_15 = np.full( ( clusters.shape[ 0 ], 10 ), -np.inf, np.float )
for t in range( clusters.shape[ 0 ] ) :
inx = np.where( c_s == t )[ 0 ]
actual_labels = train_labels[ inx ]
l, f = np.unique( actual_labels, return_counts = True )
label_cluster_score_15[ t, l ] = [ np.average( sorted(
ll_s[ inx[ actual_labels == a ] ].flatten( ), reverse = True )[ : 5 ] ) for a in l ]
template_x_label_lik_15 = np.argmax( label_cluster_score_15, axis = 1 )
show_data( clusters[ np.argsort( template_x_label_lik_15 ) ], clusters.shape[ 0 ],
15, cmap = plt.cm.spectral, interpolation = 'nearest' )
print template_x_label_lik_15[ np.argsort( template_x_label_lik_15 ) ].reshape((1,-1))
c_s_15, q_sk, ll_s = classify( test_data, clusters )
print "Accuracy of likelihood based labelling: %.2f" % (
100 * np.average( template_x_label_lik_15[ c_s_15 ] == test_labels ), )
print "Accuracy of simple majority labelling: %.2f" % (
100 * np.average( template_x_label_maj_15[ c_s_15 ] == test_labels ), )
clusters = clusters_10[-1]
c_s, q_sk, ll_s = classify( train_data, clusters )
template_x_label_maj_10 = np.full( clusters.shape[ 0 ], -1, np.int )
for t in range( clusters.shape[ 0 ] ) :
l, f = np.unique( train_labels[ c_s == t ], return_counts = True)
if len( l ) > 0 :
template_x_label_maj_10[ t ] = l[ np.argmax( f ) ]
label_cluster_score_10 = np.full( ( clusters.shape[ 0 ], 10 ), -np.inf, np.float )
for t in range( clusters.shape[ 0 ] ) :
inx = np.where( c_s == t )[ 0 ]
actual_labels = train_labels[ inx ]
l, f = np.unique( actual_labels, return_counts = True )
label_cluster_score_10[ t, l ] = [ np.average( sorted(
ll_s[ inx[ actual_labels == a ] ].flatten( ), reverse = True )[ : 5 ] ) for a in l ]
template_x_label_lik_10 = np.argmax( label_cluster_score_10, axis = 1 )
show_data( clusters[ np.argsort( template_x_label_lik_10 ) ], clusters.shape[ 0 ],
10, cmap = plt.cm.spectral, interpolation = 'nearest' )
print template_x_label_lik_10[ np.argsort( template_x_label_lik_10 ) ].reshape((1,-1))
c_s_10, q_sk, ll_s = classify( test_data, clusters )
print "Accuracy of likelihood based labelling: %.2f" % (
100 * np.average( template_x_label_lik_10[ c_s_10 ] == test_labels ), )
print "Accuracy of simple majority labelling: %.2f" % (
100 * np.average( template_x_label_maj_10[ c_s_10 ] == test_labels ), )
print "Model with K = 10: %.2f" % ( 100 * np.average( template_x_label_lik_10[ c_s_10 ] == test_labels ), )
print "Model with K = 15: %.2f" % ( 100 * np.average( template_x_label_lik_15[ c_s_15 ] == test_labels ), )
print "Model with K = 20: %.2f" % ( 100 * np.average( template_x_label_lik_20[ c_s_20 ] == test_labels ), )
print "Model with K = 30: %.2f" % ( 100 * np.average( template_x_label_lik_30[ c_s_30 ] == test_labels ), )
print "Model with K = 60: %.2f" % ( 100 * np.average( template_x_label_lik_60[ c_s_60 ] == test_labels ), )
( clusters_full, pi_full ), ll_full = experiment( data, 30, 1000, False, True, True )
anim_full = animate( clusters_full, ll_full, pi = pi_full, n_col = 15, n_row = 2, interval = 1, cmap = plt.cm.hot, interpolation = 'nearest' )
embed_video( anim_full )
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
pca = PCA( n_components = 50 )
X_train_pca = pca.fit_transform( X_train )
params = { 'bandwidth' : np.logspace( -1, 1, 20 ) }
grid = GridSearchCV( KernelDensity( ), params )
grid.fit( X_train_pca )
print("best bandwidth: {0}".format( grid.best_estimator_.bandwidth ) )
params
kde = grid.best_estimator_
new_data = kde.sample( 100 )
new_data = pca.inverse_transform( new_data )
print new_data.shape
plt.figure( figsize = ( 9, 9 ) )
plt.imshow( arrange_flex( new_data ), cmap = plt.cm.gray, interpolation = 'nearest' )
plt.show( )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <table class="tfo-notebook-buttons" align="left">
Step2: Vectorize an example sentence
Step3: Create a vocabulary to save mappings from tokens to integer indices
Step4: Create an inverse vocabulary to save mappings from integer indices to tokens
Step5: Vectorize your sentence
Step6: Generate skip-grams from one sentence
Step7: Print a few positive skip-grams
Step8: Negative sampling for one skip-gram
Step9: Construct one training example
Step10: Check out the context and the corresponding labels for the target word from the skip-gram example above
Step11: A tuple of (target, context, label) tensors constitutes one training example for training your skip-gram negative sampling word2vec model. Notice that the target is of shape (1,) while the context and label are of shape (1+num_ns,)
Step12: Summary
Step13: sampling_table[i] denotes the probability of sampling the i-th most common word in a dataset. The function assumes a Zipf's distribution of the word frequencies for sampling.
Step14: Prepare training data for word2vec
Step15: Read the text from the file and print the first few lines
Step16: Use the non empty lines to construct a tf.data.TextLineDataset object for the next steps
Step17: Vectorize sentences from the corpus
Step18: Call TextVectorization.adapt on the text dataset to create vocabulary.
Step19: Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with TextVectorization.get_vocabulary. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
Step20: The vectorize_layer can now be used to generate vectors for each element in the text_ds (a tf.data.Dataset). Apply Dataset.batch, Dataset.prefetch, Dataset.map, and Dataset.unbatch.
Step21: Obtain sequences from the dataset
Step22: Inspect a few examples from sequences
Step23: Generate training examples from sequences
Step24: Configure the dataset for performance
Step25: Apply Dataset.cache and Dataset.prefetch to improve performance
Step26: Model and training
Step27: Define loss function and compile model
Step28: Also define a callback to log training statistics for TensorBoard
Step29: Train the model on the dataset for some number of epochs
Step30: TensorBoard now shows the word2vec model's accuracy and loss
Step31: <!-- <img class="tfo-display-only-on-site" src="images/word2vec_tensorboard.png"/> -->
Step32: Create and save the vectors and metadata files
Step33: Download the vectors.tsv and metadata.tsv to analyze the obtained embeddings in the Embedding Projector
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
# Add a dimension so you can use concatenation (in the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concatenate a positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label the first context word as `1` (positive) followed by `num_ns` `0`s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape the target to shape `(1,)` and context and label to `(num_ns+1,)`.
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
print("target :", target)
print("context :", context)
print("label :", label)
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for `vocab_size` tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in the dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with a positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and the number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the `TextVectorization` layer to normalize, split, and map strings to
# integers. Set the `output_sequence_length` length to pad all samples to the
# same length.
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
vectorize_layer.adapt(text_ds.batch(1024))
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
#docs_infra: no_execute
%tensorboard --logdir logs
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Base features
Step2: Pitch Features
Step3: Feature Transforms
Step4: The above tells the module where to look for base features.
Step5: Output
|
<ASSISTANT_TASK:>
Python Code:
audio_dir = '../Cogitch/Audio/Eurovision/'
euro_dict = utils.dataset_from_dir(audio_dir)
data_dir = '../Cogitch/Data/Eurovision/'
# base_features.compute_and_write(audio_dir, data_dir)
pitch_features.melody_dir = data_dir + 'melody/'
pitch_features.chroma_dir = data_dir + 'hpcp/'
features = {'pitchhist3': (pitch_features.get_pitchhist3, {}),
'pitchhist3_int': (pitch_features.get_pitchhist3, {'intervals': True}),
'chromahist3': (pitch_features.get_chromahist3, {}),
'chromahist3_int': (pitch_features.get_chromahist3, {'intervals': True}),
'harmonisation': (pitch_features.get_harmonisation, {}),
'harmonisation_int': (pitch_features.get_harmonisation, {'intervals': True}) }
# pitch_features.compute_and_write(data_dir, features=features)
feature_transforms.data_dir = data_dir
features = [
'harmonisation_int.corpus.information',
'harmonisation_int.corpus.tau',
'harmonisation_int.song.information',
'harmonisation_int.song.tau',
'harmonisation.normentropy.minlog',
'harmonisation.normentropy.minlog.corpus.pdf.rank.logit',
'harmonisation.normentropy.minlog.song.pdf.rank.logit',
'chromahist3_int.corpus.information',
'chromahist3_int.corpus.tau',
'chromahist3_int.song.information',
'chromahist3_int.song.tau',
'chromahist3.normentropy.minlog',
'chromahist3.normentropy.minlog.corpus.pdf.rank.logit',
'chromahist3.normentropy.minlog.song.pdf.rank.logit',
'loudness.mean',
'loudness.mean.corpus.pdf.rank.logit',
'loudness.mean.song.pdf.rank.logit',
'loudness.std',
'loudness.std.corpus.pdf.rank.logit',
'loudness.std.song.pdf.rank.logit',
'pitchhist3_int.corpus.information',
'pitchhist3_int.corpus.tau',
'pitchhist3_int.song.information',
'pitchhist3_int.song.tau',
'pitchhist3.normentropy.minlog',
'pitchhist3.normentropy.minlog.corpus.pdf.rank.logit',
'pitchhist3.normentropy.minlog.song.pdf.rank.logit',
'mfcc.mean.corpus.indeppdf.rank.logit',
'mfcc.mean.song.indeppdf.rank.logit',
'mfcc.totvar.log',
'mfcc.totvar.log.corpus.pdf.rank.logit',
'mfcc.totvar.log.song.pdf.rank.logit',
'melody.mean',
'melody.mean.corpus.pdf.rank.logit',
'melody.mean.song.pdf.rank.logit',
'melody.std.log',
'melody.std.log.corpus.pdf.rank.logit',
'melody.std.log.song.pdf.rank.logit',
'roughness.mean.log',
'roughness.mean.log.corpus.pdf.rank.logit',
'roughness.mean.log.song.pdf.rank.logit',
'sharpness.mean',
'sharpness.mean.corpus.pdf.rank.logit',
'sharpness.mean.song.pdf.rank.logit']
data = feature_transforms.compute(euro_dict, features)
# data.hist(figsize=(28,21));
data.to_csv('euro_features.csv', index=None)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
Step2: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.
Step3: Training
Step4: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Step5: Checking out the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
image_size = mnist.train.images.shape[1]
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, (None, image_size), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, image_size), name='targets')
# Output of hidden layer, single fully connected layer here with ReLU activation
encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)
# Output layer logits, fully connected layer with no activation
logits = tf.layers.dense(inputs_, image_size, activation=None)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits, name='output')
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
learning_rate = 0.001
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Create the session
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: VTK Viewer
|
<ASSISTANT_TASK:>
Python Code:
height = 70.
width = 50.
thickness = 30.
pnt1 = [-width/2., 0., 0.]
pnt2 = [-width/2., -thickness/4., 0.]
pnt3 = [0., -thickness/2., 0.]
pnt4 = [width/2., -thickness/4., 0.]
pnt5 = [width/2., 0., 0.]
edge1 = Edge().createLine(start=pnt1, end=pnt2)
edge2 = Edge().createArc3P(start=pnt2, end=pnt4, pnt=pnt3)
edge3 = Edge().createLine(start=pnt4, end=pnt5)
halfProfile = Wire([edge1, edge2, edge3])
mirrorPlane = Plane(origin=[0,0,0], xaxis=[1,0,0], yaxis=[0,0,1])
mirrorProfile = halfProfile.mirror(mirrorPlane, copy=True)
allEdges = list(EdgeIterator(halfProfile)) + list(EdgeIterator(mirrorProfile))
fullProfile = Wire().createWire(allEdges)
bottomFace = Face().createFace(fullProfile)
body = Solid().extrude(bottomFace, (0, 0, 0), (0, 0, height))
body.fillet(thickness/12.)
neckHeight = height/10
neckRadius = thickness/4
neck = Solid().createCylinder([0,0,0], [0,0,neckHeight], radius=neckRadius)
neck.translate([0, 0, height])
body.fuse(neck)
zMax = -1
neckTopFace = None
for f in FaceIterator(body):
[x, y , z] = f.centreOfMass()
if z >= zMax:
neckTopFace = f
zMax = z
body.shell(thickness/50., [neckTopFace], tolerance=1E-3)
t_thick = neckHeight/5
t_height = neckHeight - t_thick
t_radius = neckRadius + t_thick/4
t_pitch = t_height/2
t_angle = 0
# Note the following thread geometry is not correct. The profile
# is wrong and there is a twist added to the profile. But it's
# kind of close and good enough for this example.
threadHelix = Edge().createHelix(pitch=t_pitch,
height=t_height,
radius=t_radius,
angle = t_angle)
threadFace = Face().createPolygonal([[0, 0, t_thick/2],
[t_thick, .0, 0],
[0, 0, -t_thick/2]])
threadFace.translate([t_radius, 0, 0])
thread = Solid().pipe(threadFace, threadHelix)
thread.translate([0, 0, height])
body.fuse(thread)
actor = body.toVtkActor()
try:
a = get_QApplication([])
except:
pass
vtkWin = SimpleVtkViewer()
vtkWin.add_actor(actor)
# If the VTK window is blank/white, click on the window and hit 'r' to zoom to fit.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <table border="1">
Step2: <table border="1">
Step3: <table border="1">
Step4: <table border="1">
Step5: 2.1 Central Hidroelectrica de Bombeo v.1
Step6: 2.1 Central Hidroelectrica de Bombeo v.2
Step7: 2.3 Problema del Año Bisiesto
Step8: 2.3 Problema del Año Bisiesto
|
<ASSISTANT_TASK:>
Python Code:
def mi_funcion(x,y,z):
a = x * y * z
b = x/2 + y/4 + z/8
c = a + b
return c
a = 1.0
b = 2.0
a = mi_funcion(a, b, 3.0)
print a
def mi_funcion(x,y,z):
a = x * y * z
b = x/2 + y/4 + z/8
c = a + b
return c
a = 1
b = 2
a = mi_funcion(a, b, 3)
print a
def f(x, y):
x = int(x)/4 + float(x)/4 + len(y)
return x
def g(a, b):
if a==b:
return a
else:
return a*b
a = "dos"
b = 2
c = f(2.0, g(a,b))
def f1(a,b):
return a-b
def f2(b,a):
c = f1(a,b)
return c
a = 3
b = 4
f2(a,b)
#
def accion_central(precio, capacidad):
if precio>60:
if capacidad>10:
print "generar"
else:
print "nada"
else:
if capacidad<90:
print "bombear"
else:
print "nada"
p = float(raw_input("Ingrese precio de electricidad en USD:"))
estanque = float(raw_input("Ingrese porcentaje llenado del embalse [0-100]:"))
accion_central(p,estanque)
# Solución 1
def accion_central(precio, capacidad):
if precio>60:
if capacidad>10:
print "Generar"
else:
print "Nada"
else:
if capacidad>90:
print "Nada"
else:
print "Bombear"
return
p = float(raw_input("Ingrese precio de electricidad en USD: "))
estanque = float(raw_input("Ingrese porcentaje llenado del embalse [0-100]: "))
accion_central(p,estanque)
# Solución 2
def accion_central(precio, capacidad):
if precio>60 and capacidad>10:
print "Generar"
elif precio<=60 and capacidad<=90:
print "Bombear"
else:
print "Nada"
return
p = float(raw_input("Ingrese precio de electricidad en USD: "))
estanque = float(raw_input("Ingrese porcentaje llenado del embalse [0-100]: "))
accion_central(p,estanque)
# Solucion de los alumnos
def es_bisiesto(anno):
# FIX ME
return False
year = int(raw_input("Ingrese un año: "))
print es_bisiesto(year)
# Solucion 1
def es_bisiesto(anno):
if anno % 400 == 0:
bisiesto = True
elif anno % 100 == 0:
bisiesto = False
elif anno % 4 == 0:
bisiesto = True
else:
bisiesto = False
return bisiesto
year = int(raw_input("Ingrese un año: "))
print es_bisiesto(year)
# Solucion 2
def es_bisiesto(anno):
if ((anno % 4 == 0 and anno % 100 != 0) or anno % 400 == 0):
bisiesto = True
else:
bisiesto = False
return bisiesto
year = int(raw_input("Ingrese un año: "))
print es_bisiesto(year)
# Solucion 3
def es_bisiesto(anno):
return ((anno % 4 == 0 and anno % 100 != 0) or anno % 400 == 0)
year = int(raw_input("Ingrese un año: "))
print es_bisiesto(year)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download and Prep NASA's Turbofan Engine Degradation Simulation (PHM08 Challenge) Data Set
Step2: Read training data into a DataFrame.
Step3: Create training data with 30 random engines
Step4: Read test data into a DataFrame.
Step5: Connect to SAS Viya CAS Engine
Step6: Load pandas DataFrames into CAS
Step7: Check details of loaded tables.
Step8: Import SVDD action set
Step9: Create SVDD model for training data
Step10: Score SVDD astore against scoring data
Step11: Save SVDD astore for use in SAS Event Stream Processing
|
<ASSISTANT_TASK:>
Python Code:
import os
import matplotlib.pyplot as plt
import pandas as pd
import swat # SAS Viya Python interface
%matplotlib inline
DATA_URL = 'https://ti.arc.nasa.gov/m/project/prognostic-repository/Challenge_Data.zip'
DATA_DIR = '.'
train_tsv = os.path.join(DATA_DIR, 'train.txt')
test_tsv = os.path.join(DATA_DIR, 'test.txt')
if not os.path.isfile(train_tsv) or not os.path.isfile(test_tsv):
import zipfile
from six.moves import urllib
try:
filename, headers = urllib.request.urlretrieve(DATA_URL)
with zipfile.ZipFile(filename, 'r') as data_zip:
data_zip.extract('train.txt', DATA_DIR)
data_zip.extract('test.txt', DATA_DIR)
finally:
urllib.request.urlcleanup()
# Create list of x1-x24
x = ['x%s' % i for i in range(1, 25)]
df = pd.read_table(train_tsv, delim_whitespace=True, names=['engine', 'cycle'] + x)
df.head()
train = df[df['engine'].isin([7, 28, 32, 38, 40, 51, 65, 84, 90, 95, 99, 107,
120, 124, 135, 137, 138, 148, 151, 160, 166, 178,
182, 188, 197, 199, 200, 207, 210, 211])]
# Keep first 50 observations per engine to train SVDD
train = train[train['cycle'] <= 50]
train['index'] = train.index
train.tail()
df = pd.read_table('test.txt', delim_whitespace=True, names=['engine', 'cycle'] + x)
# create a scoring data set with 9 random engines from the test data set
df['index'] = df.index
score = df[df['engine'].isin([1, 8, 22, 53, 63, 86, 102, 158, 170, 202])]
score.tail()
s = swat.CAS('localhost', 5570)
train_tbl = s.upload_frame(train, casout=dict(name='train', replace=True))
score_tbl = s.upload_frame(score, casout=dict(name='score', replace=True))
s.tableinfo()
s.loadactionset('svdd')
# Run svdd.svddTrain action set on training data
ysvdd_state = s.CASTable('ysvddstate', replace=True)
state_s = s.CASTable('state_s', replace=True)
train_tbl.svdd.svddtrain(gauss=11,
solver='actset',
inputs=x,
savestate=ysvdd_state,
output=dict(casout=state_s),
id='index')
sv = state_s.to_frame()
sv
# Load astore action set
s.loadactionset('astore')
# Score resulting SVDD astore (ysvddstate) against the scoring data (score) and output results (svddscored)
svdd_scored = s.CASTable('svddscored', replace=True)
score_tbl.astore.score(rstore=ysvdd_state, out=svdd_scored)
# Create local dataframe of scored data to plot using Matplotlib
output = svdd_scored.to_frame()
output.head()
# Add SVDD scored values to original score DataFrame for plotting purposes
df = score.merge(output, how='left')
df.head()
df = df.loc[df['engine'] < 150]
for index, group in df.groupby('engine'):
group.plot(x='cycle', y='_SVDDDISTANCE_', title=index, label='engine', figsize=(15, 4))
plt.show()
# Download SVDD astore for use in SAS Event Stream Processing (ESP)
results = s.astore.download(rstore=ysvdd_state)
# Check details of loaded data
s.tableinfo()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task
Step2: We can use this to extract the desired columns if we wish.
Step3: Now we can do the assignment. Note that we use the .loc operator to avoid a warning about "trying to set values on a copy of a slice from a DataFrame" which would happen if we used for example the following expression
Step4: The resulting DataFrame does indeed have the values yyy and zzz filled in column colA.
|
<ASSISTANT_TASK:>
Python Code:
df = pd.DataFrame({
'colA': ['aaa', NaN, NaN, NaN, 'bbb', 'ccc'],
'colB': ['xxx', 'yyy', NaN, 'zzz', NaN, 'www'],
#'colC': [NaN, 3, NaN, 1, 0, 9]
})
df
cond = df.colA.isnull() & ~df.colB.isnull()
cond
df[cond]
df.loc[cond, 'colA'] = df.loc[cond, 'colB']
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Export blocks to a VTK file
Step2: The results can be ploted in an external viewer, for example mayavi or paraview
|
<ASSISTANT_TASK:>
Python Code:
# load some modules
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import pygslib
# see block model help
help(pygslib.blockmodel)
# Create an empty block model
mymodel=pygslib.blockmodel.Blockmodel(nx=5,ny=5,nz=5,xorg=-6,yorg=-6,zorg=-6,dx=3,dy=3,dz=3)
# there is no data, it is empty
print mymodel.bmtable
# Create a model from external DataFrame.
# In this case IJK or IX, IY, IZ or XC, YC, ZC are required
bmtable=pd.DataFrame({'IJK':np.array([1,2,3,4], dtype=np.int32)})
mymodel.set_blocks(bmtable)
print mymodel.bmtable
bmtable.dtypes
# Calculate row, column and level indices from IJK
mymodel.calc_ixyz_fromijk(overwrite=False)
mymodel.calc_ixyz_fromijk(overwrite=True) # if IX, IY, IZ exist we need to overwrite
print mymodel.bmtable
# Removing data from block model
mymodel.delete_blocks()
print mymodel.bmtable
# Creating a full block model with IJK
mymodel.create_IJK(overwrite=True)
print mymodel.bmtable.tail()
# Calculate row, column and level indices from IJK
mymodel.calc_ixyz_fromijk(overwrite=True)
print mymodel.bmtable.tail()
# Calculate coordinates from indices
mymodel.calc_xyz_fromixyz(overwrite=True)
print mymodel.bmtable.tail()
mymodel.blocks2vtkRectilinearGrid('blocks')
mymodel.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the data
Step2: A quick exploratory query yields that the dataset consists of over 138 thousand users, nearly 27 thousand movies, and a little more than 20 million ratings, confirming that the data has been loaded successfully.
Step3: On examining the first few movies using the query following query, we can see that the genres column is a formatted string
Step4: We can parse the genres into an array and rewrite the table as follows
Step5: Matrix factorization
Step6: Note that we create a model as usual, except that the model_type is matrix_factorization and that we have to identify which columns play what roles in the collaborative filtering setup.
Step7: Now, we get faster convergence (three iterations instead of five), and a lot less overfitting. Here are our results
Step8: When we did that, we discovered that the evaluation loss was lower (0.97) with num_factors=16 than with num_factors=36 (1.67) or num_factors=24 (1.45). We could continue experimenting, but we are likely to see diminishing returns with further experimentation. So, let’s pick this as the final matrix factorization model and move on.
Step9: Filtering out already rated movies
Step10: For this user, this happens to yield the same set of movies -- the top predicted ratings didn’t include any of the movies the user has already seen.
Step11: Batch predictions for all users and movies
|
<ASSISTANT_TASK:>
Python Code:
import os
PROJECT = "your-project-here" # REPLACE WITH YOUR PROJECT ID
# Do not change these
os.environ["PROJECT"] = PROJECT
%%bash
rm -r bqml_data
mkdir bqml_data
cd bqml_data
curl -O 'http://files.grouplens.org/datasets/movielens/ml-20m.zip'
unzip ml-20m.zip
yes | bq rm -r $PROJECT:movielens
bq --location=US mk --dataset \
--description 'Movie Recommendations' \
$PROJECT:movielens
bq --location=US load --source_format=CSV \
--autodetect movielens.ratings ml-20m/ratings.csv
bq --location=US load --source_format=CSV \
--autodetect movielens.movies_raw ml-20m/movies.csv
%%bigquery --project $PROJECT
SELECT *
FROM movielens.ratings
LIMIT 10
%%bigquery --project $PROJECT
SELECT
COUNT(DISTINCT userId) numUsers,
COUNT(DISTINCT movieId) numMovies,
COUNT(*) totalRatings
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies_raw
WHERE movieId < 5
%%bigquery --project $PROJECT
CREATE OR REPLACE TABLE movielens.movies AS
SELECT * REPLACE(SPLIT(genres, "|") AS genres)
FROM movielens.movies_raw
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies
WHERE movieId < 5
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId', rating_col='rating')
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender`)
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender_l2
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId',
rating_col='rating', l2_reg=0.2)
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender_l2`)
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender_16
options( #TODO: Insert paramters to make a 16 factor matrix factorization model
) AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender_16`)
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g
WHERE g = 'Comedy'
))
ORDER BY predicted_rating DESC
LIMIT 5
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
WITH seen AS (
SELECT ARRAY_AGG(movieId) AS movies
FROM movielens.ratings
WHERE userId = 903
)
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g, seen
WHERE # TODO: Complete this WHERE to remove seen movies.
))
ORDER BY predicted_rating DESC
LIMIT 5
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
SELECT
96481 AS movieId,
(SELECT title FROM movielens.movies WHERE movieId=96481) title,
userId
FROM
# TODO: Select all users
))
ORDER BY predicted_rating DESC
LIMIT 5
%%bigquery --project $PROJECT
SELECT *
FROM ML.RECOMMEND(MODEL `cloud-training-demos.movielens.recommender_16`)
LIMIT 10
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We begin by defining two <tt>NumPy</tt> arrays a and b that are each filled with a million random numbers.
Step2: Next, we compute the <em style="color
Step3: To compare this time with time that is needed if a and b are stored as lists instead, we convert a and b to ordinary Python lists.
Step4: Next, we compute the <em style="color
Step5: We notice that <tt>NumPy</tt> based computation is much faster than the list based computation. Similar observations can be made when a function is applied to all elements of an array. For big arrays, using the vectorized functions offered by <tt>NumPy</tt> is usually much faster than applying the function to all elements of a list.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.random.rand(1_000_000)
b = np.random.rand(1_000_000)
%%timeit
a @ b
la = list(a)
lb = list(b)
%%timeit
mysum = 0
for i in range(len(la)):
mysum += la[i] * lb[i]
import math
%%timeit
for i, x in enumerate(la):
lb[i] = math.sin(x)
%%timeit
b = np.sin(a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Numpy array basics
Step2: Under the hood
Step3: You can check whether an array actually owns its data by looking at its flags (you should understand both differences in the two flag settings)
Step4: Array creation
Step5: (see also
Step6: Reshape must not change the number of elements within the array.
Step7: (see
Step8: (see
|
<ASSISTANT_TASK:>
Python Code:
# imports
import numpy as np # It will be used a lot, so the shorthand is helpful.
import matplotlib.pyplot as plt # Same here.
%matplotlib inline
# these can be useful if you plan on using the respective functions a lot:
np.random.seed(42) # Seeding is important to replicate results when using random numbers.
rnd = np.random.random
sin = np.sin # Be careful to no write "sin = np.sin()"! Why?
cos = np.cos
RAD2DEG = 180.0/np.pi # Constants for quick conversion between radians (used by sin/cos) and degree
DEG2RAD = np.pi/180.0
m = np.array([[1,2,3],
[4,5,6],
[7,8,9]], dtype=np.int32) # np.float32, np.float64, np.complex64, np.complex128
print m
print 'ndim: ', m.ndim, '\nshape:', m.shape, '\nsize: ', m.size, '\ndtype:', m.dtype
s = m[1]
print 'BEFORE'
print s, 'slice', '\n'
print m, '\n'
s[0] = 0
print 'AFTER'
print s, 'slice' '\n'
print m, '\n'
print m.flags, '\n'
print s.flags
# helper function for examples below; plots the graphical depiction of a given numpy array
def showMatrix(X):
Y = np.array(np.array(X, ndmin=2)) # 1D -> 2D
vmin = min(np.min(Y), 0)
vmax = max(np.max(Y), 1)
plt.imshow(Y, interpolation='none', vmin=vmin, vmax=vmax, cmap=plt.cm.get_cmap('Blues'))
Z = np.zeros(9)
showMatrix(Z)
Z = np.zeros((5,9))
showMatrix(Z)
Z = np.ones(9)
showMatrix(Z)
Z = np.ones((5,9))
showMatrix(Z)
Z = np.array( [0,0,0,0,0,0,0,0,0] )
showMatrix(Z)
Z = np.array( [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]] )
showMatrix(Z)
Z = np.arange(9) # the numpy arange function also allows floating point arguments
showMatrix(Z)
Z = np.arange(5*9).reshape(5,9)
showMatrix(Z)
Z = np.random.uniform(0,1,9) # args: min, max, no. of elements
showMatrix(Z)
Z = np.random.uniform(0, 1, (5, 9))
showMatrix(Z)
# single element
Z = np.zeros((5, 9))
Z[1,1] = 1
showMatrix(Z)
# single row
Z = np.zeros((5, 9))
Z[1,:] = 1
showMatrix(Z)
# single column
Z = np.zeros((5, 9))
Z[:,1] = 1
showMatrix(Z)
# specific area
Z = np.zeros((5, 9))
Z[2:4,2:6] = 1 # for each dimension format is always: <from:to:step> (with step being optional)
showMatrix(Z)
# every second column
Z = np.zeros((5, 9))
Z[:,::2] = 1 # for each dimension format is always: <from:to:step> (with step being optional)
showMatrix(Z)
# indices can be negative
Z = np.arange(10)
print ">>> Z[-1]: ", Z[-1] # start indexing at the back
print ">>> Z[3:-3]:", Z[3:-3] # slice of array center
print ">>> Z[::-1]:", Z[::-1] # quickly reverse an array
#-#-# EXC_NUMPY: YOUR CODE HERE #-#-#
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The information about the class of each sample is stored in the target attribute of the dataset
Step4: scikit-learn interface
Step5: For a given scikit-learn estimator object named model, several methods are available. Irrespective of the type of estimator, there will be a fit method
Step6: We split the data into a training set and a testing set. By default, 25% of the data is reserved for testing. This is the first of multiple ways that we will see to do this.
Step7: The LogisticRegression model in scikit-learn employs a regularization coefficient C, which defaults to 1. The amount of regularization is lower with larger values of C.
Step8: We can bootstrap some confidence intervals
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
iris = load_iris()
iris.keys()
n_samples, n_features = iris.data.shape
n_samples, n_features
iris.data[0]
iris.target
iris.target_names
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
x_index = 3
y_index = 2
# this formatter will label the colorbar with the correct target names
formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])
plt.scatter(iris.data[:, x_index], iris.data[:, y_index], c=iris.target)
plt.colorbar(ticks=[0, 1, 2], format=formatter)
plt.xlabel(iris.feature_names[x_index])
plt.ylabel(iris.feature_names[y_index])
from sklearn.decomposition import PCA
pca = PCA(n_components=2, whiten=True).fit(iris.data)
X_pca = pca.transform(iris.data)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=iris.target)
plt.colorbar(ticks=[0, 1, 2], format=formatter)
var_explained = pca.explained_variance_ratio_ * 100
plt.xlabel('First Component: {0:.1f}%'.format(var_explained[0]))
plt.ylabel('Second Component: {0:.1f}%'.format(var_explained[1]))
class Estimator(object):
def fit(self, X, y=None):
Fit model to data X (and y)
self.some_attribute = self.some_fitting_method(X, y)
return self
def predict(self, X_test):
Make prediction based on passed features
pred = self.make_prediction(X_test)
return pred
import pandas as pd
vlbw = pd.read_csv("../data/vlbw.csv", index_col=0)
subset = vlbw[['ivh', 'gest', 'bwt', 'delivery', 'inout',
'pltct', 'lowph', 'pneumo', 'twn', 'apg1']].dropna()
# Extract response variable
y = subset.ivh.replace({'absent':0, 'possible':1, 'definite':1})
# Standardize some variables
X = subset[['gest', 'bwt', 'pltct', 'lowph']]
X0 = (X - X.mean(axis=0)) / X.std(axis=0)
# Recode some variables
X0['csection'] = subset.delivery.replace({'vaginal':0, 'abdominal':1})
X0['transported'] = subset.inout.replace({'born at Duke':0, 'transported':1})
X0[['pneumo', 'twn', 'apg1']] = subset[['pneumo', 'twn','apg1']]
X0.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X0, y)
from sklearn.linear_model import LogisticRegression
lrmod = LogisticRegression(C=1000)
lrmod.fit(X_train, y_train)
pred_train = lrmod.predict(X_train)
pred_test = lrmod.predict(X_test)
pd.crosstab(y_train, pred_train,
rownames=["Actual"], colnames=["Predicted"])
pd.crosstab(y_test, pred_test,
rownames=["Actual"], colnames=["Predicted"])
for name, value in zip(X0.columns, lrmod.coef_[0]):
print('{0}:\t{1:.2f}'.format(name, value))
import numpy as np
n = 1000
boot_samples = np.empty((n, len(lrmod.coef_[0])))
for i in np.arange(n):
boot_ind = np.random.randint(0, len(X0), len(X0))
y_i, X_i = y.values[boot_ind], X0.values[boot_ind]
lrmod_i = LogisticRegression(C=1000)
lrmod_i.fit(X_i, y_i)
boot_samples[i] = lrmod_i.coef_[0]
boot_samples.sort(axis=0)
boot_se = boot_samples[[25, 975], :].T
coefs = lrmod.coef_[0]
plt.plot(coefs, 'r.')
for i in range(len(coefs)):
plt.errorbar(x=[i,i], y=boot_se[i], color='red')
plt.xlim(-0.5, 8.5)
plt.xticks(range(len(coefs)), X0.columns.values, rotation=45)
plt.axhline(0, color='k', linestyle='--')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In addition to the simulation parameters, we start with an initial seed of concentration data. Unlike our other analytical strategies there are no coefficients to compute, no functions to fit. The data could be generated from a function or from measurements. Here we choose a sin function as our initial condition.
Step2: Set up other simulation parameters
Step3: Our choice of timestep is restricted
Step4: You can think of the 2D array as having one space axis (the first index, we will use i for this one) and one time axis (the second index, we will use j for this one).
Step5: With our initial conditions in place we need to develop the computational steps to advance our solution in time. The PDE we are solving (with a constant diffusion coefficient) is
Step7: DIY
Step8: DIY
Step9: DIY
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
numberOfPoints = 100
numberOfIterations = 1000
lengthOfDomain = 1.0
dx = lengthOfDomain/numberOfPoints
xPoints = np.linspace(0.0, lengthOfDomain, numberOfPoints)
initialCondition = np.sin(xPoints*np.pi/lengthOfDomain)
def plotIC():
fig, ax = plt.subplots(figsize=(10,5))
ax.plot(xPoints, initialCondition, 'ro')
ax.set_xlabel(r'Distance $x$')
ax.set_ylabel(r'Concentration $c(x,t)$')
ax.set_title(r'Initial Conditions')
plt.show()
return
plotIC()
diffusionCoefficient = 10.0
dt = dx**2/(4*diffusionCoefficient)
arrayWithAllTheData = np.zeros((numberOfPoints,numberOfIterations), dtype='float32')
arrayWithAllTheData[:,0] = initialCondition
# Note the counting for j in this loop. You may wish to print out
# the values of i,j to help build this operation.
for j in range(1,numberOfIterations):
for i in range(1,numberOfPoints-1):
arrayWithAllTheData[i,j] = 0 # What should you put here?
%matplotlib inline
import numpy as np
from ipywidgets import interact, fixed
import matplotlib.pyplot as plt
def plotArray(xPoints, dataArray, rowID=0):
This function in conjunction with interact() permits
inspection of the contents of an array, row by row. This
is useful for some small tasks such as examining the results
of a PDE solution.
x = xPoints
y = dataArray[:,rowID]
fig = plt.figure(figsize=(7,4))
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.set_ylim(0,1)
axes.plot(x, y, 'ro', label=r"$c(x,t)$")
axes.legend()
axes.grid(False)
plt.show()
return
interact(plotArray,
xPoints=fixed(xPoints),
dataArray=fixed(arrayWithAllTheData),
rowID=(0,numberOfIterations-1,1), );
# Your solver code goes here.
# Your solver code goes here.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We use the same setup here as we do in the 'Simulating Experimental Fluorescence Binding Data' notebook.
Step3: Now make this a fluorescence experiment
Step4: That works, but the equilibration seems to happen quite late in our sampling! Let's look at some of the other parameters.
Step5: Now let's see if we can get better results using the newly implemented emcee option.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
import seaborn as sns
%pylab inline
# We define a Kd,
Kd = 2e-9 # M
# a protein concentration,
Ptot = 1e-9 * np.ones([12],np.float64) # M
# and a gradient of ligand concentrations for our experiment.
Ltot = 20.0e-6 / np.array([10**(float(i)/2.0) for i in range(12)]) # M
def two_component_binding(Kd, Ptot, Ltot):
Parameters
----------
Kd : float
Dissociation constant
Ptot : float
Total protein concentration
Ltot : float
Total ligand concentration
Returns
-------
P : float
Free protein concentration
L : float
Free ligand concentration
PL : float
Complex concentration
PL = 0.5 * ((Ptot + Ltot + Kd) - np.sqrt((Ptot + Ltot + Kd)**2 - 4*Ptot*Ltot)) # complex concentration (uM)
P = Ptot - PL; # free protein concentration in sample cell after n injections (uM)
L = Ltot - PL; # free ligand concentration in sample cell after n injections (uM)
return [P, L, PL]
[L, P, PL] = two_component_binding(Kd, Ptot, Ltot)
# y will be complex concentration
# x will be total ligand concentration
plt.semilogx(Ltot,PL, 'o')
plt.xlabel('$[L]_{tot}$ / M')
plt.ylabel('$[PL]$ / M')
plt.ylim(0,1.3e-9)
plt.axhline(Ptot[0],color='0.75',linestyle='--',label='$[P]_{tot}$')
plt.legend();
# Making max 1400 relative fluorescence units, and scaling all of PL (complex concentration)
# to that, adding some random noise
npoints = len(Ltot)
sigma = 10.0 # size of noise
F_PL_i = (1400/1e-9)*PL + sigma * np.random.randn(npoints)
# y will be complex concentration
# x will be total ligand concentration
plt.semilogx(Ltot,F_PL_i, 'ro')
plt.xlabel('$[L]_{tot}$ / M')
plt.ylabel('$Fluorescendce$')
plt.legend();
#Let's add an F_background just so we don't ever go below zero
F_background = 40
#We also need to model fluorescence for our ligand
F_L_i = F_background + (.4/1e-8)*Ltot + sigma * np.random.randn(npoints)
#Let's also add these to our complex fluorescence readout
F_PL_i = F_background + ((1400/1e-9)*PL + sigma * np.random.randn(npoints)) + ((.4/1e-8)*L + sigma * np.random.randn(npoints))
# y will be complex concentration
# x will be total ligand concentration
plt.semilogx(Ltot,F_PL_i, 'ro')
plt.semilogx(Ltot,F_L_i, 'ko')
plt.xlabel('$[L]_{tot}$ / M')
plt.ylabel('$Fluorescence$')
plt.legend();
# We know errors from our pipetting instruments.
P_error = 0.35
L_error = 0.08
assay_volume = 100e-6 # assay volume, L
dPstated = P_error * Ptot
dLstated = L_error * Ltot
# Now we'll use our Bayesian modeling scheme from assaytools.
from assaytools import pymcmodels
pymc_model = pymcmodels.make_model(Ptot, dPstated, Ltot, dLstated,
top_complex_fluorescence=F_PL_i,
top_ligand_fluorescence=F_L_i,
use_primary_inner_filter_correction=True,
use_secondary_inner_filter_correction=True,
assay_volume=assay_volume, DG_prior='uniform')
mcmc = pymcmodels.run_mcmc(pymc_model)
import matplotlib.patches as mpatches #this is for plotting with color patches
def mcmc_three_plots(pymc_model,mcmc,Lstated):
sns.set(style='white')
sns.set_context('talk')
import pymbar
[t,g,Neff_max] = pymbar.timeseries.detectEquilibration(mcmc.DeltaG.trace())
interval= np.percentile(a=mcmc.DeltaG.trace()[t:], q=[2.5, 50.0, 97.5])
[hist,bin_edges] = np.histogram(mcmc.DeltaG.trace()[t:],bins=40,normed=True)
binwidth = np.abs(bin_edges[0]-bin_edges[1])
#set colors for 95% interval
clrs = [(0.7372549019607844, 0.5098039215686274, 0.7411764705882353) for xx in bin_edges]
idxs = bin_edges.argsort()
idxs = idxs[::-1]
gray_before = idxs[bin_edges[idxs] < interval[0]]
gray_after = idxs[bin_edges[idxs] > interval[2]]
for idx in gray_before:
clrs[idx] = (.5,.5,.5)
for idx in gray_after:
clrs[idx] = (.5,.5,.5)
plt.clf();
plt.figure(figsize=(12,3));
plt.subplot(131)
property_name = 'top_complex_fluorescence'
complex = getattr(pymc_model, property_name)
property_name = 'top_ligand_fluorescence'
ligand = getattr(pymc_model, property_name)
for top_complex_fluorescence_model in mcmc.top_complex_fluorescence_model.trace()[::10]:
plt.semilogx(Lstated, top_complex_fluorescence_model, marker='.',color='silver')
for top_ligand_fluorescence_model in mcmc.top_ligand_fluorescence_model.trace()[::10]:
plt.semilogx(Lstated, top_ligand_fluorescence_model, marker='.',color='lightcoral', alpha=0.2)
plt.semilogx(Lstated, complex.value, 'ko',label='complex')
plt.semilogx(Lstated, ligand.value, marker='o',color='firebrick',linestyle='None',label='ligand')
#plt.xlim(.5e-8,5e-5)
plt.xlabel('$[L]_T$ (M)');
plt.yticks([])
plt.ylabel('fluorescence');
plt.legend(loc=0);
plt.subplot(132)
plt.bar(bin_edges[:-1]+binwidth/2,hist,binwidth,color=clrs, edgecolor = "white");
sns.kdeplot(mcmc.DeltaG.trace()[t:],bw=.4,color=(0.39215686274509803, 0.7098039215686275, 0.803921568627451),shade=False)
plt.axvline(x=interval[0],color=(0.5,0.5,0.5),linestyle='--')
plt.axvline(x=interval[1],color=(0.5,0.5,0.5),linestyle='--')
plt.axvline(x=interval[2],color=(0.5,0.5,0.5),linestyle='--')
plt.axvline(x=np.log(Kd),color='k')
plt.xlabel('$\Delta G$ ($k_B T$)',fontsize=16);
plt.ylabel('$P(\Delta G)$',fontsize=16);
#plt.xlim(-15,-8)
hist_legend = mpatches.Patch(color=(0.7372549019607844, 0.5098039215686274, 0.7411764705882353),
label = '$\Delta G$ = %.3g [%.3g,%.3g] $k_B T$'
%(interval[1],interval[0],interval[2]) )
plt.legend(handles=[hist_legend],fontsize=10,loc=0,frameon=True);
plt.subplot(133)
plt.plot(range(0,t),mcmc.DeltaG.trace()[:t], 'g.',label='equil. at %s'%t)
plt.plot(range(t,len(mcmc.DeltaG.trace())),mcmc.DeltaG.trace()[t:], '.')
plt.xlabel('MCMC sample');
plt.ylabel('$\Delta G$ ($k_B T$)');
plt.legend(loc=2);
plt.tight_layout();
return [t,interval,hist,bin_edges,binwidth]
Kd
print 'Real Kd is 2nm or %s k_B T.' %np.log(Kd)
[t,interval,hist,bin_edges,binwidth] = mcmc_three_plots(pymc_model,mcmc,Ltot)
well_area = 0.1586 # well area, cm^2 # half-area wells were used here
path_length = assay_volume / well_area
from assaytools import plots
plots.plot_mcmc_results(Ltot, Ptot, path_length, mcmc)
mcmc_emcee = pymcmodels.run_mcmc_emcee(pymc_model)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Network Traffic Forecasting with AutoTS
Step3: Download raw dataset and load into dataframe
Step4: Below are some example records of the data
Step5: Data pre-processing
Step6: Here, we drop weeks with more than 3 consecutive missing values and fill other missing values remained.
Step7: Plot the data to see how the KPI's look like
Step8: Time series forecasting with AutoTS
Step9: Then we initialize a AutoTSTrainer.
Step10: We can set some searching presets such as look_back which indicates the history time period we want to use for forecasting.
Step11: We need to split the data frame into train, validation and test data frame before training. You can use train_val_test_split as an easy way to finish it.
Step12: Then we fit on train data and validation data.
Step13: We get a TSPipeline after training. Let's print the hyper paramters selected.
Step14: Use it to do prediction, evaluation or incremental fitting.
Step15: plot actual and prediction values for AvgRate KPI
Step16: Calculate mean square error and the symetric mean absolute percentage error.
Step17: You can save the pipeline to file and reload it to do incremental fitting or others.
Step18: You can stop RayOnSpark after auto training.
Step19: Next, we demonstrate how to do incremental fitting with your saved pipeline file.
Step20: Then do incremental fitting with TSPipeline.fit().We use validation data frame as additional data for demonstration. You can use your new data frame.
Step21: predict and plot the result after incremental fitting.
Step22: Calculate mean square error and the symetric mean absolute percentage error.
|
<ASSISTANT_TASK:>
Python Code:
def get_drop_dates_and_len(df, allow_missing_num=3):
Find missing values and get records to drop
missing_num = df.total.isnull().astype(int).groupby(df.total.notnull().astype(int).cumsum()).sum()
drop_missing_num = missing_num[missing_num > allow_missing_num]
drop_datetimes = df.iloc[drop_missing_num.index].index
drop_len = drop_missing_num.values
return drop_datetimes, drop_len
def rm_missing_weeks(start_dts, missing_lens, df):
Drop weeks that contains more than 3 consecutive missing values.
If consecutive missing values across weeks, we remove all the weeks.
for start_time, l in zip(start_dts, missing_lens):
start = start_time - pd.Timedelta(days=start_time.dayofweek)
start = start.replace(hour=0, minute=0, second=0)
start_week_end = start + pd.Timedelta(days=6)
start_week_end = start_week_end.replace(hour=22, minute=0, second=0)
end_time = start_time + l*pd.Timedelta(hours=2)
if start_week_end < end_time:
end = end_time + pd.Timedelta(days=6-end_time.dayofweek)
end = end.replace(hour=22, minute=0, second=0)
else:
end = start_week_end
df = df.drop(df[start:end].index)
return df
# plot the predicted values and actual values (for the test data)
def plot_result(test_df, pred_df, dt_col="datetime", value_col="AvgRate", look_back=1):
# target column of dataframe is "value"
# past sequence length is 50
pred_value = pred_df[value_col][:-1].values
true_value = test_df[value_col].values[look_back:]
fig, axs = plt.subplots(figsize=(12, 5))
axs.plot(pred_df[dt_col][:-1], pred_value, color='red', label='predicted values')
axs.plot(test_df[dt_col][look_back:], true_value, color='blue', label='actual values')
axs.set_title('the predicted values and actual values (for the test data)')
plt.xlabel(dt_col)
plt.xticks(rotation=45)
plt.ylabel(value_col)
plt.legend(loc='upper left')
plt.show()
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
raw_df = pd.read_csv("data/data.csv")
raw_df.head()
df = pd.DataFrame(pd.to_datetime(raw_df.StartTime))
# we can find 'AvgRate' is of two scales: 'Mbps' and 'Gbps'
raw_df.AvgRate.str[-4:].unique()
# Unify AvgRate value
df['AvgRate'] = raw_df.AvgRate.apply(lambda x: float(x[:-4]) if x.endswith("Mbps") else float(x[:-4]) * 1000)
df["total"] = raw_df["total"]
df.set_index("StartTime", inplace=True)
full_idx = pd.date_range(start=df.index.min(), end=df.index.max(), freq='2H')
df = df.reindex(full_idx)
print("no. of n/a values:")
print(df.isna().sum())
drop_dts, drop_len = get_drop_dates_and_len(df)
df = rm_missing_weeks(drop_dts, drop_len, df)
df.ffill(inplace=True)
# AutoTS requires input data frame with a datetime column
df.index.name = "datetime"
df = df.reset_index()
df.head()
df.describe()
ax = df.plot(y='AvgRate',figsize=(12,5), title="AvgRate of network traffic data")
ax = df.plot(y='total',figsize=(12,5), title="total bytes of network traffic data")
# init RayOnSpark in local mode
from bigdl.dllib.nncontext import init_spark_on_local
from bigdl.orca.ray import OrcaRayContext
sc = init_spark_on_local(cores=4, spark_log_level="INFO")
ray_ctx = OrcaRayContext(sc=sc, object_store_memory="1g")
ray_ctx.init()
from bigdl.chronos.autots.deprecated.forecast import AutoTSTrainer
trainer = AutoTSTrainer(dt_col="datetime",
target_col="AvgRate",
horizon=1,
extra_features_col=None)
# look back in range from one week to 3 days to predict the next 2h.
look_back = (36, 84)
from bigdl.chronos.autots.deprecated.preprocessing.utils import train_val_test_split
train_df, val_df, test_df = train_val_test_split(df,
val_ratio=0.1,
test_ratio=0.1,
look_back=look_back[0])
from bigdl.chronos.autots.deprecated.config.recipe import LSTMGridRandomRecipe
%%time
ts_pipeline = trainer.fit(train_df, val_df,
recipe=LSTMGridRandomRecipe(
num_rand_samples=1,
epochs=1,
look_back=look_back,
batch_size=[64]),
metric="mse")
ts_pipeline.internal.config
pred_df = ts_pipeline.predict(test_df)
# plot the predicted values and actual values
plot_result(test_df, pred_df, dt_col="datetime", value_col="AvgRate", look_back=ts_pipeline.internal.config['past_seq_len'])
mse, smape = ts_pipeline.evaluate(test_df, metrics=["mse", "smape"])
print("Evaluate: the mean square error is", mse)
print("Evaluate: the smape value is", smape)
# save pipeline file
my_ppl_file_path = ts_pipeline.save("/tmp/saved_pipeline/my.ppl")
# stop
ray_ctx.stop()
sc.stop()
# load file
from bigdl.chronos.autots.deprecated.forecast import TSPipeline
loaded_ppl = TSPipeline.load(my_ppl_file_path)
# we use validation data frame as additional data for demonstration.
loaded_ppl.fit(val_df, epochs=2)
# predict results of test_df
new_pred_df = loaded_ppl.predict(test_df)
plot_result(test_df, new_pred_df, look_back=loaded_ppl.internal.config['past_seq_len'])
# evaluate test_df
mse, smape = loaded_ppl.evaluate(test_df, metrics=["mse", "smape"])
print("Evaluate: the mean square error is", mse)
print("Evaluate: the smape value is", smape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <H2> Theoretical distribution</H2>
Step2: <H2>Empirical distribution</H2>
Step3: note that cells with ID <12 or > 87 receive almost zero conductance (check sigma)
Step4: <H2> Plot both together </H2>
Step5: let's generate another distribution with mean = 0 and standar deviation of 10
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from scipy.stats import norm
# Create a normal distribution
mu = 50
sigma = 10 # standard deviation
rv = norm(loc = mu, scale = sigma)
start = rv.ppf(0.00001)
stop = rv.ppf(0.99999)
x = np.linspace(start, stop, 100)
print(start, stop)
plt.plot(x, rv.pdf(x))
plt.xlabel('Cells IDs'), plt.ylabel('Conductance');
plt.xlim(0,100);
# generate empirical distribution
cell_idx = np.arange(int(start), int(stop))
mycond = [rv.pdf(i) for i in cell_idx]
print(mycond[50])
plt.bar( cell_idx, mycond, width=.85, facecolor='k', label='empirical');
plt.xlabel('Cells IDs'), plt.ylabel('Conductance');
rv.pdf(87)
plt.plot(x, rv.pdf(x), c='r')
plt.bar( cell_idx, mycond, width=.85, facecolor='k', label='empirical');
plt.xlabel('Cells IDs'), plt.ylabel('Conductance');
rv2 = norm(loc = 0, scale = 10)
start = rv2.ppf(0.0001)
stop = rv2.ppf(0.9999)
x = np.linspace(start, stop, 100)
mycond = [rv2.pdf(i) for i in x]
print(start, stop)
cell_idx = np.arange(int(start), int(stop))
mycond = [rv2.pdf(i) for i in cell_idx]
plt.plot(x, rv2.pdf(x), color = 'red')
plt.bar( cell_idx, mycond, width=.85, facecolor='k', label='empirical');
plt.xlabel('Cells IDs'), plt.ylabel('Conductance');
rv3 = norm(loc = 0, scale = 20)
start = rv3.ppf(0.0001)
stop = rv3.ppf(0.9999)
x = np.arange(start, stop)
plt.plot(x, rv3.pdf(x)/10, 'red')
print(start, stop)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now that we have factor values and returns, we can see what would happen if we ranked our equities based on factor values, and then entered the long and short positions.
Step2: Let's compute the returns if we go long the top basket and short the bottom basket.
Step3: Market Neutrality is Built-In
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# We'll generate a random factor
current_factor_values = np.random.normal(0, 1, 10000)
equity_names = ['Equity ' + str(x) for x in range(10000)]
# Put it into a dataframe
factor_data = pd.Series(current_factor_values, index = equity_names)
factor_data = pd.DataFrame(factor_data, columns=['Factor Value'])
# Take a look at the dataframe
factor_data.head(10)
# Now let's say our future returns are dependent on our factor values
future_returns = current_factor_values + np.random.normal(0, 1, 10000)
returns_data = pd.Series(future_returns, index=equity_names)
returns_data = pd.DataFrame(returns_data, columns=['Returns'])
# Put both the factor values and returns into one dataframe
data = returns_data.join(factor_data)
# Take a look
data.head(10)
# Rank the equities
ranked_data = data.sort('Factor Value')
# Compute the returns of each basket
# Baskets of size 500, so we create an empty array of shape (10000/500)
number_of_baskets = 10000/500
basket_returns = np.zeros(number_of_baskets)
for i in range(number_of_baskets):
start = i * 500
end = i * 500 + 500
basket_returns[i] = ranked_data[start:end]['Returns'].mean()
# Plot the returns of each basket
plt.bar(range(number_of_baskets), basket_returns)
plt.ylabel('Returns')
plt.xlabel('Basket')
plt.legend(['Returns of Each Basket']);
basket_returns[number_of_baskets-1] - basket_returns[0]
# We'll generate a random factor
current_factor_values = np.random.normal(0, 1, 10000)
equity_names = ['Equity ' + str(x) for x in range(10000)]
# Put it into a dataframe
factor_data = pd.Series(current_factor_values, index = equity_names)
factor_data = pd.DataFrame(factor_data, columns=['Factor Value'])
# Now let's say our future returns are dependent on our factor values
future_returns = -10 + current_factor_values + np.random.normal(0, 1, 10000)
returns_data = pd.Series(future_returns, index=equity_names)
returns_data = pd.DataFrame(returns_data, columns=['Returns'])
# Put both the factor values and returns into one dataframe
data = returns_data.join(factor_data)
# Rank the equities
ranked_data = data.sort('Factor Value')
# Compute the returns of each basket
# Baskets of size 500, so we create an empty array of shape (10000/500
number_of_baskets = 10000/500
basket_returns = np.zeros(number_of_baskets)
for i in range(number_of_baskets):
start = i * 500
end = i * 500 + 500
basket_returns[i] = ranked_data[start:end]['Returns'].mean()
basket_returns[number_of_baskets-1] - basket_returns[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SPA output
Step2: Speed tests
Step3: This numba test will only work properly if you have installed numba.
Step4: The numba calculation takes a long time the first time that it's run because it uses LLVM to compile the Python code to machine code. After that it's about 4-10 times faster depending on your machine. You can pass a numthreads argument to this function. The optimum numthreads depends on your machine and is equal to 4 by default.
|
<ASSISTANT_TASK:>
Python Code:
import datetime
# scientific python add-ons
import numpy as np
import pandas as pd
# plotting stuff
# first line makes the plots appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
# finally, we import the pvlib library
import pvlib
import pvlib
from pvlib.location import Location
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
print(tus)
golden = Location(39.742476, -105.1786, 'America/Denver', 1830, 'Golden')
print(golden)
golden_mst = Location(39.742476, -105.1786, 'MST', 1830, 'Golden MST')
print(golden_mst)
berlin = Location(52.5167, 13.3833, 'Europe/Berlin', 34, 'Berlin')
print(berlin)
times = pd.date_range(start=datetime.datetime(2014,6,23), end=datetime.datetime(2014,6,24), freq='1Min')
times_loc = times.tz_localize(tus.pytz)
times
pyephemout = pvlib.solarposition.pyephem(times_loc, tus.latitude, tus.longitude)
spaout = pvlib.solarposition.spa_python(times_loc, tus.latitude, tus.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
spaout['elevation'].plot(label='spa')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('spa')
print(spaout.head())
plt.figure()
pyephemout['elevation'].plot(label='pyephem')
spaout['elevation'].plot(label='spa')
(pyephemout['elevation'] - spaout['elevation']).plot(label='diff')
plt.legend(ncol=3)
plt.title('elevation')
plt.figure()
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
spaout['elevation'].plot(label='spa')
(pyephemout['apparent_elevation'] - spaout['elevation']).plot(label='diff')
plt.legend(ncol=3)
plt.title('elevation')
plt.figure()
pyephemout['apparent_zenith'].plot(label='pyephem apparent')
spaout['zenith'].plot(label='spa')
(pyephemout['apparent_zenith'] - spaout['zenith']).plot(label='diff')
plt.legend(ncol=3)
plt.title('zenith')
plt.figure()
pyephemout['apparent_azimuth'].plot(label='pyephem apparent')
spaout['azimuth'].plot(label='spa')
(pyephemout['apparent_azimuth'] - spaout['azimuth']).plot(label='diff')
plt.legend(ncol=3)
plt.title('azimuth')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
spaout = pvlib.solarposition.spa_python(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
spaout['elevation'].plot(label='spa')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('spa')
print(spaout.head())
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
loc = berlin
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
ephemout['apparent_elevation'].plot(label='ephem apparent')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
loc = berlin
times = pd.DatetimeIndex(start=datetime.date(2015,3,28), end=datetime.date(2015,3,29), freq='5min')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
plt.figure()
pyephemout['azimuth'].plot(label='pyephem')
ephemout['azimuth'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('azimuth')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
loc = berlin
times = pd.DatetimeIndex(start=datetime.date(2015,3,30), end=datetime.date(2015,3,31), freq='5min')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
plt.figure()
pyephemout['azimuth'].plot(label='pyephem')
ephemout['azimuth'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('azimuth')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
loc = berlin
times = pd.DatetimeIndex(start=datetime.date(2015,6,28), end=datetime.date(2015,6,29), freq='5min')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
plt.figure()
pyephemout['azimuth'].plot(label='pyephem')
ephemout['azimuth'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('azimuth')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
ephemout['apparent_elevation'].plot(label='ephem apparent')
plt.legend(ncol=2)
plt.title('elevation')
plt.xlim(pd.Timestamp('2015-06-28 02:00:00+02:00'), pd.Timestamp('2015-06-28 06:00:00+02:00'))
plt.ylim(-10,10)
times_loc = times.tz_localize(loc.tz)
%%timeit
pyephemout = pvlib.solarposition.pyephem(times_loc, loc.latitude, loc.longitude)
#ephemout = pvlib.solarposition.ephemeris(times, loc)
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.ephemeris(times_loc, loc.latitude, loc.longitude)
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.get_solarposition(times_loc, loc.latitude, loc.longitude,
method='nrel_numpy')
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.get_solarposition(times_loc, loc.latitude, loc.longitude,
method='nrel_numba')
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.get_solarposition(times_loc, loc.latitude, loc.longitude,
method='nrel_numba', numthreads=16)
%%timeit
ephemout = pvlib.solarposition.spa_python(times_loc, loc.latitude, loc.longitude,
how='numba', numthreads=16)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note, this also holds for interference between waves of different amplitudes. I've verified this with beam simulations.
Step2: The step size $\Delta k$ is given by $\frac{2\pi}{(20\mu m)(1340)}$ based on pixel size and pixel count
Step3: So the horizontal axis scale should reflect these values. Note, they will not change with the setup so we can hard-code them into our graphing routines.
|
<ASSISTANT_TASK:>
Python Code:
from numpy.fft import *
import numpy
t = numpy.arange(0,100)
data = 5*numpy.sin(t) + 3*numpy.sin(0.5*t)
%pylab inline
plot(data)
fft_out = abs(fft.fft(data))
fft_out.max()
plot(fft_out)
from scipy.signal import find_peaks_cwt
peak_ind = find_peaks_cwt(fft_out, numpy.arange(1,3))
fft_out[peak_ind]
247/147.0
5/3.0
K_x = 2*pi/780e-9 * numpy.sin(0.005) # in radians per meter
delta_K = 2*pi/(20e-6*1340) # deltaK in radians per meter
166*delta_K # radians per meter
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Data from 10x
Step2: Run Doublet Detection
Step3: Visualize Results
Step4: Doublets on umap
Step5: Number of predicted doublets at different threshold combinations
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import doubletdetection
import scanpy as sc
import matplotlib.pyplot as plt
sc.settings.n_jobs=8
sc.set_figure_params()
%matplotlib inline
adata = sc.read_10x_h5(
"pbmc_10k_v3_filtered_feature_bc_matrix.h5",
backup_url="https://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_10k_v3/pbmc_10k_v3_filtered_feature_bc_matrix.h5"
)
adata.var_names_make_unique()
# remove "empty" genes
sc.pp.filter_genes(adata, min_cells=1)
clf = doubletdetection.BoostClassifier(
n_iters=10,
clustering_algorithm="louvain",
standard_scaling=True,
pseudocount=0.1,
n_jobs=-1,
)
doublets = clf.fit(adata.X).predict(p_thresh=1e-16, voter_thresh=0.5)
doublet_score = clf.doublet_score()
adata.obs["doublet"] = doublets
adata.obs["doublet_score"] = doublet_score
f = doubletdetection.plot.convergence(clf, save='convergence_test.pdf', show=True, p_thresh=1e-16, voter_thresh=0.5)
sc.pp.normalize_total(adata)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata)
sc.tl.pca(adata)
sc.pp.neighbors(adata)
sc.tl.umap(adata)
sc.pl.umap(adata, color=["doublet", "doublet_score"])
sc.pl.violin(adata, "doublet_score")
f3 = doubletdetection.plot.threshold(clf, save='threshold_test.pdf', show=True, p_step=6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you look at the RTFD section on RasterModelGrid you'll notice that it contains the same information.
Step2: Note also that the ? works for attributes of classes (assuming they have some documentation).
|
<ASSISTANT_TASK:>
Python Code:
from landlab import RasterModelGrid
from landlab.components import LinearDiffuser
?RasterModelGrid
?LinearDiffuser
grid = RasterModelGrid((10, 10), xy_spacing=(3, 4))
?grid.add_ones
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: $\alpha$-CsCl ($Pm\overline{3}m$)
Step2: Compare it with the experimental XRD pattern below.
Step3: $\beta$-CsCl ($Fm\overline{3}m$)
Step4: Compare it with the experimental XRD pattern below.
|
<ASSISTANT_TASK:>
Python Code:
# Set up some imports that we will need
from pymatgen.core import Lattice, Structure
from pymatgen.analysis.diffraction.xrd import XRDCalculator
from IPython.display import Image, display
%matplotlib inline
# Create CsCl structure
a = 4.209 #Angstrom
latt = Lattice.cubic(a)
structure = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
c = XRDCalculator()
c.show_plot(structure)
display(Image(filename=('./PDF - alpha CsCl.png')))
# Create CsCl structure
a = 6.923 #Angstrom
latt = Lattice.cubic(a)
structure = Structure(latt, ["Cs", "Cs", "Cs", "Cs", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5],
[0.5, 0.5, 0.5], [0, 0, 0.5], [0, 0.5, 0], [0.5, 0, 0]])
c.show_plot(structure)
display(Image(filename=('./PDF - beta CsCl.png')))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Approach 2
Step2: Speed test
Step3: Roughly 13 X slower than using C math directly
|
<ASSISTANT_TASK:>
Python Code:
%%cython -a
# cython: boundscheck=False
from math import sin, cos
cdef inline double versine(double x):
return 1.0 - cos(x)
def versine_array_py(double[:] x):
cdef int i, n = x.shape[0]
for i in range(n):
x[i] = versine(x[i])
%%cython -a
# cython: boundscheck=False
from libc.math cimport sin, cos
cdef inline double versine(double x):
return 1.0 - cos(x)
def versine_array_cy(double[:] x):
cdef int i, n = x.shape[0]
for i in range(n):
x[i] = versine(x[i])
import numpy
data = numpy.random.rand(10000)
%timeit versine_array_py(data)
data = numpy.random.rand(10000)
%timeit versine_array_cy(data)
from math import cos
def versine_array_pyonly(x):
for i in range(len(x)):
x[i] = 1 - cos(x[i])
data = numpy.random.rand(10000)
%timeit versine_array_pyonly(data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training mini-batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: If you implemented get_batches correctly, the above output should look something like
Step8: LSTM Cell
Step9: RNN Output
Step10: Training loss
Step11: Optimizer
Step12: Build the network
Step13: Hyperparameters
Step14: Time for training
Step15: Saved checkpoints
Step16: Sampling
Step17: Here, pass in the path to a checkpoint and sample from the network.
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
len(vocab)
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the batch size and number of batches we can make
batch_size = n_seqs * n_steps # 우리가 리턴할 batch 의 크기 (즉, 한 batch안에 몇 개의 character가 있는지)
n_batches = len(arr) // batch_size # 우리가 만들 batch 들의 갯수
# Keep only enough characters to make full batches
arr = arr[:batch_size * n_batches]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0] # You'll usually see the first input character used as the last target character
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Use a basic LSTM cell
lstms = [tf.contrib.rnn.BasicLSTMCell(lstm_size) for _ in range(num_layers)]
# Add dropout to the cell outputs
drops = [tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob) for lstm in lstms]
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell(drops)
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.add(tf.matmul(x, softmax_w), softmax_b)
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name="predictions")
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape()) # tf.reshape(y_one_hot, [-1, lstm_size])
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_reshaped, logits=logits)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, inputs=x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data (don't change this if you're running the notebook on the cluster)
Step2: Exploring Dates
Step3: Convert into date-time type
Step4: Part 1 - Observations from the data
Step5: Crime per year
Step6: Let's look at residential burglary.
Step7: Normalized over the annual average
Step8: Fitting the regression line
|
<ASSISTANT_TASK:>
Python Code:
### Load libraries
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
help(plt.legend)
%%time
df = pd.read_excel('/home/data/APD/COBRA083016_2015.xlsx', sheetname='Query')
df.shape
for c in df.columns:
print(c)
df[0:5]
df.describe()
df.offense_id.min(), df.offense_id.max()
df.groupby(['UC2 Literal']).offense_id.count()
df[['offense_id', 'occur_date', 'occur_time', 'rpt_date']][1:10]
df['occur_ts'] = pd.to_datetime(df.occur_date+' '+df.occur_time)
#df[['offense_id', 'occur_date', 'occur_time', 'occur_ts', 'rpt_date']][1:10]
df['occur_ts'] = pd.to_datetime(df.occur_date+' '+df.occur_time)
df['occur_month'] = df['occur_ts'].map(lambda x: x.month)
df['occur_woy'] = df.occur_ts.dt.weekofyear
df.describe()
resdf = df.groupby(['UC2 Literal', 'occur_month']).offense_id.count()
resdf
resdf['BURGLARY-RESIDENCE'].as_matrix()
resdf['BURGLARY-RESIDENCE'].iloc(0)
%matplotlib inline
fig = plt.figure(figsize=(10,6)) # 10inx10in
#plt.plot(resdf['BURGLARY-RESIDENCE'].index, resdf['BURGLARY-RESIDENCE'])
plt.scatter(resdf['BURGLARY-RESIDENCE'].index, resdf['BURGLARY-RESIDENCE'], marker='x')
plt.scatter(resdf['BURGLARY-NONRES'].index, resdf['BURGLARY-NONRES'], marker='o')
plt.ylim(0, 500)
plt.title('BURGLARY-RESIDENCE')
plt.xticks(range(13), ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
fig.savefig('BurglaryResidence_over_month.svg')
x = 1
df = pd.read_excel('/home/data/APD/COBRA083016_2015.xlsx', sheetname='Query')
df['occur_ts'] = pd.to_datetime(df.occur_date+' '+df.occur_time)
df['occur_month'] = df['occur_ts'].map(lambda x: x.month)
df['occur_woy'] = df.occur_ts.dt.weekofyear
%matplotlib inline
resdf = df.groupby(['UC2 Literal', 'occur_month']).offense_id.count()
fig = plt.figure(figsize=(10,6))
plt.scatter(resdf['BURGLARY-RESIDENCE'].index, resdf['BURGLARY-RESIDENCE'], marker='x')
plt.ylim(0, 500)
plt.title('BURGLARY-RESIDENCE')
plt.xticks(range(13), ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.savefig('quiz3-burglary-residence.png')
''
plt.savefig('quiz3-burglary-residence.png')
## load complete dataset
dff = pd.read_excel('/home/data/APD/COBRA083016.xlsx', sheetname='Query')
dff.shape
for evt in ['occur', 'poss']:
dff['%s_ts'%evt] = pd.to_datetime(dff['%s_date'%evt]+' '+dff['%s_time'%evt])
dff['rpt_ts'] = pd.to_datetime(dff.rpt_date)
', '.join(dff.columns)
dff['occur_year'] = dff.occur_ts.dt.year
dff['occur_month'] = dff.occur_ts.dt.month
dff['occur_dayweek'] = dff.occur_ts.dt.dayofweek
crime_year = dff[dff.occur_year.between(2009, 2015)].groupby(by=['UC2 Literal', 'occur_year']).offense_id.count()
%matplotlib inline
fig = plt.figure(figsize=(40,30))
crime_types = crime_year.index.levels[0]
years = crime_year.index.levels[1]
for c in range(len(crime_types)):
y_max = max(crime_year.loc[crime_types[c]])
plt.subplot(4,3,c+1)
plt.hlines(crime_year.loc[crime_types[c]].iloc[-1]*100/y_max, years[0], years[-1], linestyles="dashed", color="r")
plt.bar(crime_year.loc[crime_types[c]].index, crime_year.loc[crime_types[c]]*100/y_max, label=crime_types[c], alpha=0.5)
##plt.legend()
plt.ylim(0, 100)
plt.xticks(years+0.4, [str(int(y)) for y in years], rotation=0, fontsize=24)
plt.yticks([0,20,40,60,80,100], ['0%','20%','40%','60%','80%','100%'], fontsize=24)
plt.title(crime_types[c], fontsize=30)
None
c = 3
crime_types[c]
crime_year_month = dff[dff.occur_year.between(2009, 2015)].groupby(by=['UC2 Literal', 'occur_year', 'occur_month']).offense_id.count()
c = 3 ## 'BURGLARY-RESIDENCE'
resburglaries = crime_year_month.loc[crime_types[c]]
fig = plt.figure(figsize=(20,10))
for y in years:
plt.plot(resburglaries.loc[y].index, resburglaries.loc[y], label=("%4.0f"%y))
plt.legend()
plt.title("Seasonal Trends - %s"%crime_types[c], fontsize=20)
plt.xticks(range(13), ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.xlim(0,13)
None
c = 3 ## 'BURGLARY-RESIDENCE'
fig = plt.figure(figsize=(20,10))
for y in years:
avg = resburglaries.loc[y].mean()
plt.hlines(avg, 1, 13, linestyle='dashed')
plt.plot(resburglaries.loc[y].index, resburglaries.loc[y], label=("%4.0f"%y))
plt.legend()
plt.title("Seasonal Trends - %s (with annuale averages)"%crime_types[c], fontsize=20)
plt.xticks(list(range(1,13)), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.xlim(0,13)
None
c = 3 ## 'BURGLARY-RESIDENCE'
fig = plt.figure(figsize=(20,10))
for y in years:
avg = resburglaries.loc[y].mean()
std = resburglaries.loc[y].std()
##plt.hlines(avg, 1, 13, linestyle='dashed')
plt.plot(resburglaries.loc[y].index, (resburglaries.loc[y]-avg)/std, label=("%4.0f"%y))
plt.legend()
plt.title("Seasonal Trends - %s (normalized)"%crime_types[c], fontsize=20)
plt.xticks(list(range(1,13)), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.xlim(0,13)
plt.ylabel("Standard deviations $\sigma_y$")
None
seasonal_adjust = resburglaries.reset_index().groupby(by=['occur_month']).offense_id.agg('mean')
### in case we want to save a DataFrame
#writer = pd.ExcelWriter('myresults.xlsx')
#df.to_excel(writer,'Results')
#writer.save()
resdf
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: RP Extract Library
Step2: Python Libraries
Step3: Additional Libraries
Step4: MP3 Decoder
Step5: Import + Test your Environment
Step6: <a name="processing"><font color="#0404B4">2. Audio Processing</font></a>
Step7: <b>Note about Normalization
Step8: Plot Wave form
Step9: Audio Pre-processing
Step10: Below an example waveform of a mono channel after combining the stereo channels by arithmetic mean
Step11: <a name="extraction"><font color="#0404B4">3. Audio Feature Extraction</font></a>
Step12: Statistical Spectrum Descriptor
Step13: Rhythm Histogram
Step14: Get rough BPM from Rhythm Histogram
Step15: Modulation Frequency Variance Descriptor
Step16: <a name="application"><font color="#0404B4">4. Application Scenarios</font></a>
Step17: Get Track Info
Step18: Get Track URLs
Step19: Download Preview MP3
Step20: Iterate over a List of Soundcloud Tracks
Step21: <a name="soundcloudanalysis"><font color="#0404B4">4.2. Analyzing Songs from Soundcloud</font></a>
Step22: <b>Note
Step23: Further Example
Step24: <a name="similar"><font color="#0404B4">4.3. Finding Similar Sounding Songs</font></a>
Step25: The Soundcloud Demo Dataset
Step26: The SoundcloudDemodatasetHandler abstracts the access to the TU-Wien server. On this server the extracted features are stored as csv-files. The SoundcloudDemodatasetHandler remotely loads the features and returns them by request. The features have been extracted using the method explained in the previous sections.
Step27: Finding rhythmically similar songs
Step28: Finding rhythmically similar songs using Rhythm Histograms
Step29: Our query-song
Step30: Retrieve the feature vector for the query song
Step31: Search the nearest neighbors of the query-feature-vector
Step32: Lookup the corresponding Soundcloud-IDs
Step33: Listen to the results
Step34: Finding rhythmically similar songs using Rhythm Patterns
Step35: Finding songs based on Timbral Similarity
Step36: Compare the Results of Timbral and Rhythmic Similarity
Step37: Using your Own Query Song from the self-extracted Souncloud tracks above
Step38: Visual Player with the Songs most similar to our Own Song
Step39: Add On
|
<ASSISTANT_TASK:>
Python Code:
# to install iPython notebook on your computer, use this in Terminal
sudo pip install "ipython[notebook]"
# in Terminal
git clone https://github.com/tuwien-musicir/rp_extract.git
# in Terminal
sudo pip install numpy scipy matplotlib
# in Terminal
sudo pip install soundcloud urllib unicsv scikit-learn
git clone https://github.com/tuwien-musicir/mir_utils.git
import os
path = '/path/to/ffmpeg/'
os.environ['PATH'] += os.pathsep + path
%pylab inline
import warnings
warnings.filterwarnings('ignore')
%load_ext autoreload
%autoreload 2
# numerical processing and scientific libraries
import numpy as np
# plotting
import matplotlib.pyplot as plt
# reading wav and mp3 files
from audiofile_read import * # included in the rp_extract git package
# Rhythm Pattern Audio Extraction Library
from rp_extract_python import rp_extract
from rp_plot import * # can be skipped if you don't want to do any plots
# misc
from urllib import urlopen
import urllib2
import gzip
import StringIO
# provide/adjust the path to your wav or mp3 file
audiofile = "music/1972-048 Elvis Presley - Burning Love 22khz.mp3"
samplerate, samplewidth, wavedata = audiofile_read(audiofile)
samplerate, samplewidth, wavedata = audiofile_read(audiofile, normalize=False)
wavedata.shape
nsamples = wavedata.shape[0]
nchannels = wavedata.shape[1]
print "Successfully read audio file:", audiofile
print samplerate, "Hz,", samplewidth*8, "bit,", nchannels, "channel(s),", nsamples, "samples"
max_samples_plot = 4 * samplerate # limit number of samples to plot (to 4 sec), to avoid graphical overflow
if nsamples < max_samples_plot:
max_samples_plot = nsamples
plot_waveform(wavedata[0:max_samples_plot], 16, 5);
# use combine the channels by calculating their geometric mean
wavedata_mono = np.mean(wavedata, axis=1)
plot_waveform(wavedata_mono[0:max_samples_plot], 16, 3)
plotstft(wavedata_mono, samplerate, binsize=512, ignore=True);
features = rp_extract(wavedata, # the two-channel wave-data of the audio-file
samplerate, # the samplerate of the audio-file
extract_rp = True, # <== extract this feature!
transform_db = True, # apply psycho-accoustic transformation
transform_phon = True, # apply psycho-accoustic transformation
transform_sone = True, # apply psycho-accoustic transformation
fluctuation_strength_weighting=True, # apply psycho-accoustic transformation
skip_leadin_fadeout = 1, # skip lead-in/fade-out. value = number of segments skipped
step_width = 1) #
plotrp(features['rp'])
features = rp_extract(wavedata, # the two-channel wave-data of the audio-file
samplerate, # the samplerate of the audio-file
extract_ssd = True, # <== extract this feature!
transform_db = True, # apply psycho-accoustic transformation
transform_phon = True, # apply psycho-accoustic transformation
transform_sone = True, # apply psycho-accoustic transformation
fluctuation_strength_weighting=True, # apply psycho-accoustic transformation
skip_leadin_fadeout = 1, # skip lead-in/fade-out. value = number of segments skipped
step_width = 1) #
plotssd(features['ssd'])
features = rp_extract(wavedata, # the two-channel wave-data of the audio-file
samplerate, # the samplerate of the audio-file
extract_rh = True, # <== extract this feature!
transform_db = True, # apply psycho-accoustic transformation
transform_phon = True, # apply psycho-accoustic transformation
transform_sone = True, # apply psycho-accoustic transformation
fluctuation_strength_weighting=True, # apply psycho-accoustic transformation
skip_leadin_fadeout = 1, # skip lead-in/fade-out. value = number of segments skipped
step_width = 1) #
plotrh(features['rh'])
maxbin = features['rh'].argmax(axis=0) + 1 # +1 because it starts from 0
mod_freq_res = 1.0 / (2**18/44100.0) # resolution of modulation frequency axis (0.168 Hz) (= 1/(segment_size/samplerate))
#print mod_freq_res * 60 # resolution
bpm = maxbin * mod_freq_res * 60
print bpm
# adapt the fext array to your needs:
fext = ['rp','ssd','rh','mvd'] # sh, tssd, trh
features = rp_extract(wavedata,
samplerate,
extract_rp = ('rp' in fext), # extract Rhythm Patterns features
extract_ssd = ('ssd' in fext), # extract Statistical Spectrum Descriptor
extract_sh = ('sh' in fext), # extract Statistical Histograms
extract_tssd = ('tssd' in fext), # extract temporal Statistical Spectrum Descriptor
extract_rh = ('rh' in fext), # extract Rhythm Histogram features
extract_trh = ('trh' in fext), # extract temporal Rhythm Histogram features
extract_mvd = ('mvd' in fext), # extract Modulation Frequency Variance Descriptor
spectral_masking=True,
transform_db=True,
transform_phon=True,
transform_sone=True,
fluctuation_strength_weighting=True,
skip_leadin_fadeout=1,
step_width=1)
# let's see what we got in our dict
print features.keys()
# list the feature type dimensions
for k in features.keys():
print k, features[k].shape
# START SOUNDCLOUD API
import soundcloud
import urllib # for mp3 download
# To use soundcloud-python, you must first create a Client instance, passing at a minimum the client id you
# obtained when you registered your app:
# If you only need read-only access to public resources, simply provide a client id when creating a Client instance:
my_client_id= 'insert your soundcloud client id here'
client = soundcloud.Client(client_id=my_client_id)
# if there is no error after this, it should have worked
# GET TRACK INFO
#soundcloud_url = 'http://soundcloud.com/forss/flickermood'
soundcloud_url = 'https://soundcloud.com/majorlazer/be-together-feat-wild-belle'
track = client.get('/resolve', url=soundcloud_url)
print "TRACK ID:", track.id
print "Title:", track.title
print "Artist: ", track.user['username']
print "Genre: ", track.genre
print track.bpm, "bpm"
print track.playback_count, "times played"
print track.download_count, "times downloaded"
print "Downloadable?", track.downloadable
# if you want to see all information contained in 'track':
print vars(track)
if hasattr(track, 'download_url'):
print track.download_url
print track.stream_url
stream = client.get('/tracks/%d/streams' % track.id)
#print vars(stream)
print stream.http_mp3_128_url
# set the MP3 download directory
mp3_dir = './music'
mp3_file = mp3_dir + os.sep + "%s.mp3" % track.title
# Download the 128 kbit stream MP3
urllib.urlretrieve (stream.http_mp3_128_url, mp3_file)
print "Downloaded " + mp3_file
# use your own soundcloud urls here
soundcloud_urls = [
'https://soundcloud.com/absencemusik/lana-del-rey-born-to-die-absence-remix',
'https://soundcloud.com/princefoxmusic/raindrops-feat-kerli-prince-fox-remix',
'https://soundcloud.com/octobersveryown/remyboyz-my-way-rmx-ft-drake'
]
mp3_dir = './music'
mp3_files = []
own_track_ids = []
for url in soundcloud_urls:
print url
track = client.get('/resolve', url=url)
mp3_file = mp3_dir + os.sep + "%s.mp3" % track.title
mp3_files.append(mp3_file)
own_track_ids.append(track.id)
stream = client.get('/tracks/%d/streams' % track.id)
if hasattr(stream, 'http_mp3_128_url'):
mp3_url = stream.http_mp3_128_url
elif hasattr(stream, 'preview_mp3_128_url'): # if we cant get the full mp3 we take the 1:30 preview
mp3_url = stream.preview_mp3_128_url
else:
print "No MP3 can be downloaded for this song."
mp3_url = None # in this case we can't get an mp3
if not mp3_url == None:
urllib.urlretrieve (mp3_url, mp3_file) # Download the 128 kbit stream MP3
print "Downloaded " + mp3_file
# show list of mp3 files we got:
# print mp3_files
# mp3_files is the list of downloaded Soundcloud files as stored above (mp3_files.append())
# all_features will be a list of dict entries for all files
all_features = []
for mp3 in mp3_files:
# Read the Audio file
samplerate, samplewidth, wavedata = audiofile_read(mp3)
print "Successfully read audio file:", mp3
nsamples = wavedata.shape[0]
nchannels = wavedata.shape[1]
print samplerate, "Hz,", samplewidth*8, "bit,", nchannels, "channel(s),", nsamples, "samples"
# Extract the Audio Features
# (adapt the fext array to your needs)
fext = ['rp','ssd','rh','mvd'] # sh, tssd, trh
features = rp_extract(wavedata,
samplerate,
extract_rp = ('rp' in fext), # extract Rhythm Patterns features
extract_ssd = ('ssd' in fext), # extract Statistical Spectrum Descriptor
extract_sh = ('sh' in fext), # extract Statistical Histograms
extract_tssd = ('tssd' in fext), # extract temporal Statistical Spectrum Descriptor
extract_rh = ('rh' in fext), # extract Rhythm Histogram features
extract_trh = ('trh' in fext), # extract temporal Rhythm Histogram features
extract_mvd = ('mvd' in fext), # extract Modulation Frequency Variance Descriptor
)
all_features.append(features)
print "Finished analyzing", len(mp3_files), "files."
# iterates over all featuers (files) we extracted
for feat in all_features:
plotrp(feat['rp'])
plotrh(feat['rh'])
maxbin = feat['rh'].argmax(axis=0) + 1 # +1 because it starts from 0
bpm = maxbin * mod_freq_res * 60
print "roughly", round(bpm), "bpm"
# currently this does not work
genre = 'Dancehall'
curr_offset = 0 # Note: the API has a limit of 50 items per response, so to get more you have to query multiple times with an offset.
tracks = client.get('/tracks', genres=genre, offset=curr_offset)
print "Retrieved", len(tracks), "track objects data"
# original Soundcloud example, searching for genre and bpm
# currently this does not work
tracks = client.get('/tracks', genres='punk', bpm={'from': 120})
# IMPORTING mir_utils (installed from git above in parallel to rp_extract (otherwise ajust path))
import sys
sys.path.append("../mir_utils")
from demo.NotebookUtils import *
from demo.PlottingUtils import *
from demo.Soundcloud_Demo_Dataset import SoundcloudDemodatasetHandler
# IMPORTS for NearestNeighbor Search
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
# show the data set as Souncloud playlist
iframe = '<iframe width="100%" height="450" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/playlists/106852365&auto_play=false&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=false"></iframe>'
HTML(iframe)
# first argument is local file path for downloaded MP3s and local metadata (if present, otherwise None)
scds = SoundcloudDemodatasetHandler(None, lazy=False)
# Initialize the similarity search object
sim_song_search = NearestNeighbors(n_neighbors = 6, metric='euclidean')
# set feature type
feature_set = 'rh'
# get features from Soundcloud demo set
demoset_features = scds.features[feature_set]["data"]
# Normalize the extracted features
scaled_feature_space = StandardScaler().fit_transform(demoset_features)
# Fit the Nearest-Neighbor search object to the extracted features
sim_song_search.fit(scaled_feature_space)
query_track_soundcloud_id = 68687842 # Mr. Saxobeat
HTML(scds.getPlayerHTMLForID(query_track_soundcloud_id))
query_track_feature_vector = scaled_feature_space[scds.features[feature_set]["ids"] == query_track_soundcloud_id]
(distances, similar_songs) = sim_song_search.kneighbors(query_track_feature_vector, return_distance=True)
print distances
print similar_songs
# For now we use only the song indices without distances
similar_songs = sim_song_search.kneighbors(query_track_feature_vector, return_distance=False)[0]
# because we are searching in the entire collection, the top-most result is the query song itself. Thus, we can skip it.
similar_songs = similar_songs[1:]
similar_soundcloud_ids = scds.features[feature_set]["ids"][similar_songs]
print similar_soundcloud_ids
SoundcloudTracklist(similar_soundcloud_ids, width=90, height=120, visual=False)
def search_similar_songs_by_id(query_song_id, feature_set, skip_query=True):
scaled_feature_space = StandardScaler().fit_transform(scds.features[feature_set]["data"])
sim_song_search.fit(scaled_feature_space);
query_track_feature_vector = scaled_feature_space[scds.features[feature_set]["ids"] == query_song_id]
similar_songs = sim_song_search.kneighbors(query_track_feature_vector, return_distance=False)[0]
if skip_query:
similar_songs = similar_songs[1:]
similar_soundcloud_ids = scds.features[feature_set]["ids"][similar_songs]
return similar_soundcloud_ids
similar_soundcloud_ids = search_similar_songs_by_id(query_track_soundcloud_id,
feature_set='rp')
SoundcloudTracklist(similar_soundcloud_ids, width=90, height=120, visual=False)
similar_soundcloud_ids = search_similar_songs_by_id(query_track_soundcloud_id,
feature_set='ssd')
SoundcloudTracklist(similar_soundcloud_ids, width=90, height=120, visual=False)
track_id = 68687842 # 40439758
results_track_1 = search_similar_songs_by_id(track_id, feature_set='ssd', skip_query=False)
results_track_2 = search_similar_songs_by_id(track_id, feature_set='rh', skip_query=False)
compareSimilarityResults([results_track_1, results_track_2],
width=100, height=120, visual=False,
columns=['Statistical Spectrum Descriptors', 'Rhythm Histograms'])
# check which files we got
mp3_files
# select from the list above the number of the song you want to use as a query (counting from 1)
song_id = 3 # count from 1
# select the feature vector type
feat_type = 'rp' # 'rh' or 'ssd' or 'rp'
# from the all_features data structure, we get the desired feature vector belonging to that song
query_feature_vector = all_features[song_id - 1][feat_type]
# get all the feature vectors of desired feature type from the Soundcloud demo set
demo_features = scds.features[feat_type]["data"]
# Initialize Neighbour Search space with demo set features
sim_song_search.fit(demo_features)
# use our own query_feature_vector for search in the demo set
(distances, similar_songs) = sim_song_search.kneighbors(query_feature_vector, return_distance=True)
print distances
print similar_songs
# now we got the song indices for similar songs in the demo set
similar_songs = similar_songs[0]
similar_songs
# and we get the according Soundcloud Track IDs
similar_soundcloud_ids = scds.features[feat_type]["ids"][similar_songs]
similar_soundcloud_ids
# we add our own Track ID at the beginning to show the seed song below:
my_track_id = own_track_ids[song_id - 1]
print my_track_id
result = np.insert(similar_soundcloud_ids,0,my_track_id)
print "Feature Type:", feat_type
SoundcloudTracklist(result, width=90, height=120, visual=False)
def search_similar_songs_with_combined_sets(scds, query_song_id, feature_sets, skip_query=True, n_neighbors=6):
features = scds.getCombinedFeaturesets(feature_sets)
sim_song_search = NearestNeighbors(n_neighbors = n_neighbors, metric='l2')
#
scaled_feature_space = StandardScaler().fit_transform(features)
#
sim_song_search.fit(scaled_feature_space);
#
query_track_feature_vector = scaled_feature_space[scds.getFeatureIndexByID(query_song_id, feature_sets[0])]
#
similar_songs = sim_song_search.kneighbors(query_track_feature_vector, return_distance=False)[0]
if skip_query:
similar_songs = similar_songs[1:]
#
similar_soundcloud_ids = scds.getIdsByIndex(similar_songs, feature_sets[0])
return similar_soundcloud_ids
feature_sets = ['ssd','rh']
compareSimilarityResults([search_similar_songs_with_combined_sets(scds, 68687842, feature_sets=feature_sets, n_neighbors=5),
search_similar_songs_with_combined_sets(scds, 40439758, feature_sets=feature_sets, n_neighbors=5)],
width=100, height=120, visual=False,
columns=[scds.getNameByID(68687842),
scds.getNameByID(40439758)])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Automatic differentiation and gradient tape
Step2: Gradient tapes
Step3: You can also request gradients of the output with respect to intermediate values computed during a "recorded" tf.GradientTape context.
Step4: By default, the resources held by a GradientTape are released as soon as GradientTape.gradient() method is called. To compute multiple gradients over the same computation, create a persistent gradient tape. This allows multiple calls to the gradient() method as resources are released when the tape object is garbage collected. For example
Step5: Recording control flow
Step6: Higher-order gradients
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow.compat.v1 as tf
x = tf.ones((2, 2))
with tf.GradientTape() as t:
t.watch(x)
y = tf.reduce_sum(x)
z = tf.multiply(y, y)
# Derivative of z with respect to the original input tensor x
dz_dx = t.gradient(z, x)
for i in [0, 1]:
for j in [0, 1]:
assert dz_dx[i][j].numpy() == 8.0
x = tf.ones((2, 2))
with tf.GradientTape() as t:
t.watch(x)
y = tf.reduce_sum(x)
z = tf.multiply(y, y)
# Use the tape to compute the derivative of z with respect to the
# intermediate value y.
dz_dy = t.gradient(z, y)
assert dz_dy.numpy() == 8.0
x = tf.constant(3.0)
with tf.GradientTape(persistent=True) as t:
t.watch(x)
y = x * x
z = y * y
dz_dx = t.gradient(z, x) # 108.0 (4*x^3 at x = 3)
dy_dx = t.gradient(y, x) # 6.0
del t # Drop the reference to the tape
def f(x, y):
output = 1.0
for i in range(y):
if i > 1 and i < 5:
output = tf.multiply(output, x)
return output
def grad(x, y):
with tf.GradientTape() as t:
t.watch(x)
out = f(x, y)
return t.gradient(out, x)
x = tf.convert_to_tensor(2.0)
assert grad(x, 6).numpy() == 12.0
assert grad(x, 5).numpy() == 12.0
assert grad(x, 4).numpy() == 4.0
x = tf.Variable(1.0) # Create a Tensorflow variable initialized to 1.0
with tf.GradientTape() as t:
with tf.GradientTape() as t2:
y = x * x * x
# Compute the gradient inside the 't' context manager
# which means the gradient computation is differentiable as well.
dy_dx = t2.gradient(y, x)
d2y_dx2 = t.gradient(dy_dx, x)
assert dy_dx.numpy() == 3.0
assert d2y_dx2.numpy() == 6.0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mi configuración es la siguiente
Step2: Primero necesitamos poder conectar con la base de datos. Esto es de lo poco que diferirá con respecto a otros RDBMSs.
Step3: Ahora que ya tenemos una conexión a la base de datos Pandas se puede encargar del trabajo sucio de 'hablar' con la base de datos y ayudarnos a interactuar directamente con los datos de la forma habitual y potente de Pandas.
Step4: Vamos a crear una segunda tabla para añadir un poco de complejidad a las consultas que hagamos posteriormente a la base de datos. Esta tabla contendrá información de los sensores usados para las medidas.
Step5: Consultando la base de datos
Step6: En la consulta anterior hemos usados varias palabras clave
Step7: Ahora queremos los datos de precipitación de Junio junto con su fecha. Fijaos que el valor superior del rango no es inclusivo (BETWEEN '2014-06-01' AND '2014-07-01' nos da el dato hasta antes de fecha = '2014-07-01')
Step8: Ahora quiero los datos de temperatura de los sensores con una precisión superior a 0.5 (el sensor marcaT1 es el único que me da precisión superior a 0.5)
Step10: Si os fijáis, en la consulta anterior he usado datos.sensorT (tabla.columna). Esto solo será necesario si estamos trabajando con varias tablas y una columna se puede llamar igual en varias tablas. De esta forma no hay posibilidad de equivocarse ya que usamos 'nombres y apellidos'.
Step12: Podemos decirle que nos pase solo una serie de valores. Por ejemplo, solo quiero los tres valores más altos de precipitación de diciembre
Step13: En la consulta anterior le hemos pedido que nos ordenase por los valores de precipitación de forma descendente (es decir, por el final) y le hemos pedido que nos limitase la búsqueda a tres valores, los tres valores más altos.
|
<ASSISTANT_TASK:>
Python Code:
import sqlite3
import datetime as dt
import pandas as pd
import numpy as np
%load_ext version_information
%version_information pandas, numpy
conn = sqlite3.connect('pybodb.sqlite')
# ejemplo con PostgreSQL usando psycopg2
# import psycopg2
# conn = psycopg2.connect(database='ejemplodb', user='kiko')
# ejemplo con MS ACCESS usando pyodbc (sí, en el trabajo tengo que usar estas cosas)
# import pyodbc
# conn = pyodbc.connect("Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=ejemplodb;")
# ...
# fechas para cada día del 2014
fechas = pd.date_range(dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))
# Temperatura media diaria durante 2014 en algún lugar del hemisferio Norte
tmed = (np.random.randint(-5, 5, size = 365) +
20 * np.cos(np.arange(0 - 180, 365 - 180) * 2 * np.pi / 365)
+ 10)
# Precipitación acumulada en 24h
prec = (20 * np.abs(np.random.randn(365) *
np.cos(np.arange(0, 365) * 2 * np.pi / 365)))
# Sensor que midió la temperatura
marcaT = np.random.choice(['marcaT1', 'marcaT2', 'marcaT3'], size = 365)
# Sensor midió la precipitación
marcaP = np.random.choice(['marcaP1', 'marcaP2'], size = 365)
# Creamos el dataframe y lo guardamos en una tabla llamada 'datos'
df = pd.DataFrame(
np.array([fechas.values, tmed, prec, marcaT, marcaP]).T,
columns = ['fecha', 'tmedia','precipitacion','sensorT','sensorP'])
df['fecha'] = pd.to_datetime(df['fecha'])
df.to_sql('datos', con = conn, dtype = {'time': 'TIMESTAMP'})
# fechas para cada día del 2014
sensores = ['marcaT1', 'marcaT2', 'marcaT3',
'marcaP1', 'marcaP2']
# Precisión de los sensores
precision = [0.1, 0.5, 1,
2, 5]
df = pd.DataFrame({'sensores': sensores, 'precision': precision})
df.to_sql('sensores', con = conn)
# Esto es específico para sqlite
print(pd.read_sql("SELECT name FROM sqlite_master WHERE type='table';", conn))
# Para otras BBDD puedes buscar en internet :-)
df = pd.read_sql("SELECT * FROM sensores;", conn)
print(df)
df = pd.read_sql("SELECT fecha, precipitacion FROM datos WHERE fecha BETWEEN '2014-06-01' AND '2014-07-01';", conn)
print(df)
df = pd.read_sql("SELECT fecha, tmedia FROM datos WHERE datos.sensorT = 'marcaT1';", conn)
print(df)
# La misma consulta de antes pero sin saber la precisión de cada uno de los sensores a priori
q =
SELECT
fecha, tmedia
FROM
datos
WHERE
datos.sensorT =
(SELECT
sensores
FROM
sensores
WHERE
precision < 0.5);
df = pd.read_sql(q, conn)
print(df)
q =
SELECT
fecha, precipitacion
FROM
datos
WHERE
fecha > '2014-11-30'
ORDER BY
precipitacion DESC
LIMIT
3
df = pd.read_sql(q, conn)
print(df)
conn.close() # :-)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train Test Split
Step2: Preprocesamiento de la información
Step3: Entrenamiento del modelo
Step4: Predicciones y Evaluación
|
<ASSISTANT_TASK:>
Python Code:
# librerias
import pandas as pd
data = pd.read_csv('../../data/wine.csv', names = ["Cultivator", "Alchol", "Malic_Acid", "Ash", "Alcalinity_of_Ash", "Magnesium", "Total_phenols", "Falvanoids", "Nonflavanoid_phenols", "Proanthocyanins", "Color_intensity", "Hue", "OD280", "Proline"])
# verificar la integridad de la información
data.head()
# realizar una descriptiva gráfica del dataframe
data.describe().transpose()
# estructura de los datos
data.shape
# preparar los datos para su procesamiento
X = data.drop('Cultivator', axis=1)
y = data['Cultivator']
# importar función
from sklearn.model_selection import train_test_split
# generar la información para su procesamiento
X_train, X_test, y_train, y_test = train_test_split(X,y)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# ajustar la información de entrenamiento
scaler.fit(X_train)
# aplicar la transformación a toda la información
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# importar librería
from sklearn.neural_network import MLPClassifier
# crear la instancía que nos permitira definir el número de capaz y el máximo número de iteraciones
mlp = MLPClassifier(hidden_layer_sizes=(15,15,15),max_iter=1000)
# ahora que tenemos el modelo generardo, ya podemos ajustar la información al modelo
mlp.fit(X_train,y_train)
predictions = mlp.predict(X_test)
# importar los métodos de reporte
from sklearn.metrics import classification_report,confusion_matrix
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading the Data
Step2: From the utils_preproc package we have written for you,
Step3: Sentence Integerizing
Step4: The outputted tokenizer can be used to get back the actual works
Step5: Creating the tf.data.Dataset
Step6: load_and_integerize
Step7: Train and eval splits
Step8: Now let's load and integerize the sentence paris and store the tokenizer for the source and the target language into the int_lang and targ_lang variable respectively
Step9: Let us store the maximal sentence length of both languages into two variables
Step10: We are now using scikit-learn train_test_split to create our splits
Step11: Let's make sure the number of example in each split looks good
Step12: The utils_preproc.int2word function allows you to transform back the integerized sentences into words. Note that the <start> token is alwasy encoded as 1, while the <end> token is always encoded as 0
Step13: Create tf.data dataset for train and eval
Step14: Let's now create the actual train and eval dataset using the function above
Step15: Training the RNN encoder-decoder model
Step16: Let's implement the encoder network with Keras functional API. It will
Step17: We now implement the decoder network, which is very similar to the encoder network.
Step18: The last part of the encoder-decoder architecture is a softmax Dense layer that will create the next word probability vector or next word predictions from the decoder_output
Step19: To be able to train the encoder-decoder network defined above, we now need to create a trainable Keras Model by specifying which are the inputs and the outputs of our problem. They should correspond exactly to what the type of input/output in our train and eval tf.data.Dataset since that's what will be fed to the inputs and outputs we declare while instantiating the Keras Model.
Step20: Let's now train the model!
Step21: Implementing the translation (or decoding) function
Step23: Now that we have a separate encoder and a separate decoder, let's implement a translation function, to which we will give the generic name of decode_sequences (to stress that this procedure is general to all seq2seq problems).
Step24: Now we're ready to predict!
Step25: Checkpoint Model
Step26: Evaluation Metric (BLEU)
Step27: Let's now average the bleu_1 and bleu_4 scores for all the sentence pairs in the eval set. The next cell takes some time to run, the bulk of which is decoding the 6000 sentences in the validation set. Please wait unitl completes.
|
<ASSISTANT_TASK:>
Python Code:
pip freeze | grep nltk || pip install nltk
import os
import pickle
import sys
import nltk
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import (
Dense,
Embedding,
GRU,
Input,
)
from tensorflow.keras.models import (
load_model,
Model,
)
import utils_preproc
print(tf.__version__)
SEED = 0
MODEL_PATH = 'translate_models/baseline'
DATA_URL = 'http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip'
LOAD_CHECKPOINT = False
tf.random.set_seed(SEED)
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin=DATA_URL, extract=True)
path_to_file = os.path.join(
os.path.dirname(path_to_zip),
"spa-eng/spa.txt"
)
print("Translation data stored at:", path_to_file)
data = pd.read_csv(
path_to_file, sep='\t', header=None, names=['english', 'spanish'])
data.sample(3)
raw = [
"No estamos comiendo.",
"Está llegando el invierno.",
"El invierno se acerca.",
"Tom no comio nada.",
"Su pierna mala le impidió ganar la carrera.",
"Su respuesta es erronea.",
"¿Qué tal si damos un paseo después del almuerzo?"
]
processed = [utils_preproc.preprocess_sentence(s) for s in raw]
processed
integerized, tokenizer = utils_preproc.tokenize(processed)
integerized
tokenizer.sequences_to_texts(integerized)
def load_and_preprocess(path, num_examples):
with open(path_to_file, 'r') as fp:
lines = fp.read().strip().split('\n')
# TODO 1a
sentence_pairs = [
[utils_preproc.preprocess_sentence(sent) for sent in line.split('\t')]
for line in lines[:num_examples]
]
return zip(*sentence_pairs)
en, sp = load_and_preprocess(path_to_file, num_examples=10)
print(en[-1])
print(sp[-1])
def load_and_integerize(path, num_examples=None):
targ_lang, inp_lang = load_and_preprocess(path, num_examples)
# TODO 1b
input_tensor, inp_lang_tokenizer = utils_preproc.tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = utils_preproc.tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
TEST_PROP = 0.2
NUM_EXAMPLES = 30000
input_tensor, target_tensor, inp_lang, targ_lang = load_and_integerize(
path_to_file, NUM_EXAMPLES)
max_length_targ = target_tensor.shape[1]
max_length_inp = input_tensor.shape[1]
splits = train_test_split(
input_tensor, target_tensor, test_size=TEST_PROP, random_state=SEED)
input_tensor_train = splits[0]
input_tensor_val = splits[1]
target_tensor_train = splits[2]
target_tensor_val = splits[3]
(len(input_tensor_train), len(target_tensor_train),
len(input_tensor_val), len(target_tensor_val))
print("Input Language; int to word mapping")
print(input_tensor_train[0])
print(utils_preproc.int2word(inp_lang, input_tensor_train[0]), '\n')
print("Target Language; int to word mapping")
print(target_tensor_train[0])
print(utils_preproc.int2word(targ_lang, target_tensor_train[0]))
def create_dataset(encoder_input, decoder_input):
# TODO 1c
# shift ahead by 1
target = tf.roll(decoder_input, -1, 1)
# replace last column with 0s
zeros = tf.zeros([target.shape[0], 1], dtype=tf.int32)
target = tf.concat((target[:, :-1], zeros), axis=-1)
dataset = tf.data.Dataset.from_tensor_slices(
((encoder_input, decoder_input), target))
return dataset
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
train_dataset = create_dataset(
input_tensor_train, target_tensor_train).shuffle(
BUFFER_SIZE).repeat().batch(BATCH_SIZE, drop_remainder=True)
eval_dataset = create_dataset(
input_tensor_val, target_tensor_val).batch(
BATCH_SIZE, drop_remainder=True)
EMBEDDING_DIM = 256
HIDDEN_UNITS = 1024
INPUT_VOCAB_SIZE = len(inp_lang.word_index) + 1
TARGET_VOCAB_SIZE = len(targ_lang.word_index) + 1
encoder_inputs = Input(shape=(None,), name="encoder_input")
# TODO 2a
encoder_inputs_embedded = Embedding(
input_dim=INPUT_VOCAB_SIZE,
output_dim=EMBEDDING_DIM,
input_length=max_length_inp)(encoder_inputs)
encoder_rnn = GRU(
units=HIDDEN_UNITS,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
encoder_outputs, encoder_state = encoder_rnn(encoder_inputs_embedded)
decoder_inputs = Input(shape=(None,), name="decoder_input")
# TODO 2b
decoder_inputs_embedded = Embedding(
input_dim=TARGET_VOCAB_SIZE,
output_dim=EMBEDDING_DIM,
input_length=max_length_targ)(decoder_inputs)
decoder_rnn = GRU(
units=HIDDEN_UNITS,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
decoder_outputs, decoder_state = decoder_rnn(
decoder_inputs_embedded, initial_state=encoder_state)
decoder_dense = Dense(TARGET_VOCAB_SIZE, activation='softmax')
predictions = decoder_dense(decoder_outputs)
# TODO 2c
model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=predictions)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.summary()
STEPS_PER_EPOCH = len(input_tensor_train)//BATCH_SIZE
EPOCHS = 1
history = model.fit(
train_dataset,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=eval_dataset,
epochs=EPOCHS
)
if LOAD_CHECKPOINT:
encoder_model = load_model(os.path.join(MODEL_PATH, 'encoder_model.h5'))
decoder_model = load_model(os.path.join(MODEL_PATH, 'decoder_model.h5'))
else:
# TODO 3a
encoder_model = Model(inputs=encoder_inputs, outputs=encoder_state)
decoder_state_input = Input(shape=(HIDDEN_UNITS,), name="decoder_state_input")
# Reuses weights from the decoder_rnn layer
decoder_outputs, decoder_state = decoder_rnn(
decoder_inputs_embedded, initial_state=decoder_state_input)
# Reuses weights from the decoder_dense layer
predictions = decoder_dense(decoder_outputs)
decoder_model = Model(
inputs=[decoder_inputs, decoder_state_input],
outputs=[predictions, decoder_state]
)
def decode_sequences(input_seqs, output_tokenizer, max_decode_length=50):
Arguments:
input_seqs: int tensor of shape (BATCH_SIZE, SEQ_LEN)
output_tokenizer: Tokenizer used to conver from int to words
Returns translated sentences
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seqs)
# Populate the first character of target sequence with the start character.
batch_size = input_seqs.shape[0]
target_seq = tf.ones([batch_size, 1])
decoded_sentences = [[] for _ in range(batch_size)]
# TODO 4: Sampling loop
for i in range(max_decode_length):
output_tokens, decoder_state = decoder_model.predict(
[target_seq, states_value])
# Sample a token
sampled_token_index = np.argmax(output_tokens[:, -1, :], axis=-1)
tokens = utils_preproc.int2word(output_tokenizer, sampled_token_index)
for j in range(batch_size):
decoded_sentences[j].append(tokens[j])
# Update the target sequence (of length 1).
target_seq = tf.expand_dims(tf.constant(sampled_token_index), axis=-1)
# Update states
states_value = decoder_state
return decoded_sentences
sentences = [
"No estamos comiendo.",
"Está llegando el invierno.",
"El invierno se acerca.",
"Tom no comio nada.",
"Su pierna mala le impidió ganar la carrera.",
"Su respuesta es erronea.",
"¿Qué tal si damos un paseo después del almuerzo?"
]
reference_translations = [
"We're not eating.",
"Winter is coming.",
"Winter is coming.",
"Tom ate nothing.",
"His bad leg prevented him from winning the race.",
"Your answer is wrong.",
"How about going for a walk after lunch?"
]
machine_translations = decode_sequences(
utils_preproc.preprocess(sentences, inp_lang),
targ_lang,
max_length_targ
)
for i in range(len(sentences)):
print('-')
print('INPUT:')
print(sentences[i])
print('REFERENCE TRANSLATION:')
print(reference_translations[i])
print('MACHINE TRANSLATION:')
print(machine_translations[i])
if not LOAD_CHECKPOINT:
os.makedirs(MODEL_PATH, exist_ok=True)
# TODO 3b
model.save(os.path.join(MODEL_PATH, 'model.h5'))
encoder_model.save(os.path.join(MODEL_PATH, 'encoder_model.h5'))
decoder_model.save(os.path.join(MODEL_PATH, 'decoder_model.h5'))
with open(os.path.join(MODEL_PATH, 'encoder_tokenizer.pkl'), 'wb') as fp:
pickle.dump(inp_lang, fp)
with open(os.path.join(MODEL_PATH, 'decoder_tokenizer.pkl'), 'wb') as fp:
pickle.dump(targ_lang, fp)
def bleu_1(reference, candidate):
reference = list(filter(lambda x: x != '', reference)) # remove padding
candidate = list(filter(lambda x: x != '', candidate)) # remove padding
smoothing_function = nltk.translate.bleu_score.SmoothingFunction().method1
return nltk.translate.bleu_score.sentence_bleu(
reference, candidate, (1,), smoothing_function)
def bleu_4(reference, candidate):
reference = list(filter(lambda x: x != '', reference)) # remove padding
candidate = list(filter(lambda x: x != '', candidate)) # remove padding
smoothing_function = nltk.translate.bleu_score.SmoothingFunction().method1
return nltk.translate.bleu_score.sentence_bleu(
reference, candidate, (.25, .25, .25, .25), smoothing_function)
%%time
num_examples = len(input_tensor_val)
bleu_1_total = 0
bleu_4_total = 0
for idx in range(num_examples):
# TODO 5
reference_sentence = utils_preproc.int2word(
targ_lang, target_tensor_val[idx][1:])
decoded_sentence = decode_sequences(
input_tensor_val[idx:idx+1], targ_lang, max_length_targ)[0]
bleu_1_total += bleu_1(reference_sentence, decoded_sentence)
bleu_4_total += bleu_4(reference_sentence, decoded_sentence)
print('BLEU 1: {}'.format(bleu_1_total/num_examples))
print('BLEU 4: {}'.format(bleu_4_total/num_examples))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We're going to work with the qiime-default-reference so we have easy access to some sequences. For reasons we'll look at below, we're going to load the unaligned reference sequences (which are 97% OTUs) and the aligned reference sequences (which are 85% OTUs). If you want to adapt this recipe to train and test a classifier on other files, just set the variable names below to the file paths that you'd like to use for training.
Step2: Several recent studies of amplicon taxonomic assignment methods (Mizrahi-Man et al. 2013, Werner et al. 2012) have suggested that training Naive Bayes taxonomic classifiers against only the region of a sequence that was amplified, rather than a full length sequence, will give better taxonomic assignment results. So, lets start by slicing our reference sequences by finding some commonly used 16S primers so we train only on the fragment of the 16S that we would amplify in an amplicon survey.
Step4: The typical way to approach the problem of finding the boundaries of a short sequence in a longer sequence would be to use pairwise alignment. But, we're going to try a different approach here since pairwise alignment is inherently slow (it scales quadratically). Because these are sequencing primers, they're designed to be unique (so there shouldn't be multiple matches of a primer to a sequence), and they're designed to match as many sequences as possible. So let's try using regular expressions to match our sequencing primers in the reference database. Regular expression matching scales lineaerly, so is much faster to apply to many sequences.
Step5: We can then apply this to define a regular expression that will match our forward primer, the following sequence, and then the reverse primer. We can use the resulting matches then to find the region of our sequences that is bound by our forward and reverse primer.
Step6: Next, let's apply this to all of our unaligned sequence and find out how many reference sequences our pattern matches.
Step7: So we're matching only about 80% of our reference sequences with this pattern. The implication for this application is that we'd only know how to slice 80% of our sequences, and as a result, we'd only have 80% of our sequence to train on. In addition to this being a problem because we want to train on as many sequences possible, it's very likely that there are certain taxonomic groups are left out all together. So, using regular expressions this way won't work.
Step8: If we now look at the distribution of the start and stop positions of each regular expression match, we see that each distribution is narrowly focused around certain positions. We can use those to define the region that we want to slice from our reference alignment, and then remove the gaps from all sequences to train our classifiers.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from __future__ import division
import numpy as np
import pandas as pd
import skbio
import qiime_default_reference
###
## UPDATE THIS CELL TO USE THE DEFAULT REFERENCE AGAIN!!
###
unaligned_ref_fp = qiime_default_reference.get_reference_sequences()
aligned_ref_fp = "/Users/caporaso/data/gg_13_8_otus/rep_set_aligned/97_otus.fasta" #qiime_default_reference.get_template_alignment()
tax_ref_fp = "/Users/caporaso/data/gg_13_8_otus/taxonomy/97_otu_taxonomy.txt" #qiime_default_reference.get_reference_taxonomy()
fwd_primer = skbio.DNA("GTGCCAGCMGCCGCGGTAA", {'id':'fwd-primer'})
rev_primer = skbio.DNA("GGACTACHVGGGTWTCTAAT", {'id':'rev-primer'}).reverse_complement()
def seq_to_regex(seq):
Convert a sequence to a regular expression
result = []
sequence_class = seq.__class__
for base in str(seq):
if base in sequence_class.degenerate_chars:
result.append('[{0}]'.format(
''.join(sequence_class.degenerate_map[base])))
else:
result.append(base)
return ''.join(result)
regex = '({0}.*{1})'.format(seq_to_regex(fwd_primer),
seq_to_regex(rev_primer))
regex
seq_count = 0
match_count = 0
for seq in skbio.io.read(unaligned_ref_fp, format='fasta',
constructor=skbio.DNA):
seq_count += 1
for match in seq.find_with_regex(regex):
match_count += 1
match_percentage = (match_count / seq_count) * 100
print('{0} of {1} ({2:.2f}%) sequences have exact matches to the regular expression.'.format(match_count, seq_count, match_percentage))
starts = []
stops = []
for seq in skbio.io.read(aligned_ref_fp, format='fasta',
constructor=skbio.DNA):
for match in seq.find_with_regex(regex, ignore=seq.gaps()):
starts.append(match.start)
stops.append(match.stop)
pd.Series(starts).describe()
pd.Series(stops).describe()
locus = slice(int(np.median(starts)), int(np.median(stops)))
locus
subset_fraction = 1.0
kmer_counts = []
seq_ids = []
for seq in skbio.io.read(aligned_ref_fp, format='fasta',
constructor=skbio.DNA):
if np.random.random() > subset_fraction: continue
seq_ids.append(seq.metadata['id'])
sliced_seq = seq[locus].degap()
kmer_counts.append(sliced_seq.kmer_frequencies(8))
from sklearn.feature_extraction import DictVectorizer
X = DictVectorizer().fit_transform(kmer_counts)
taxonomy_level = 7 #
id_to_taxon = {}
with open(tax_ref_fp) as f:
for line in f:
id_, taxon = line.strip().split('\t')
id_to_taxon[id_] = '; '.join(taxon.split('; ')[:taxonomy_level])
y = [id_to_taxon[seq_id] for seq_id in seq_ids]
from sklearn.feature_selection import SelectPercentile
X = SelectPercentile().fit_transform(X, y)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
%matplotlib inline
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.ylabel('Known taxonomy')
plt.xlabel('Predicted taxonomy')
plt.tight_layout()
plt.show()
from sklearn.svm import SVC
y_pred = SVC(C=10, kernel='rbf', degree=3,
gamma=0.001).fit(X_train, y_train).predict(X_test)
from sklearn.metrics import confusion_matrix, f1_score
cm = confusion_matrix(y_test, y_pred)
cm_normalized = cm / cm.sum(axis=1)[:, np.newaxis]
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
print("F-score: %1.3f" % f1_score(y_test, y_pred, average='micro'))
from sklearn.naive_bayes import MultinomialNB
y_pred = MultinomialNB().fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_normalized = cm / cm.sum(axis=1)[:, np.newaxis]
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
print("F-score: %1.3f" % f1_score(y_test, y_pred, average='micro'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NOTE
Step2: How to manage .xml and image files? -- theupdate files are prepared in the folder "data"
Step3: obtain the file names
Step4: Using blob function to upload the image files and return the URL
Step5: upload the XML files
Step6: demo the function "query" to search the data by filetitles in database
Step7: update XML files with image URL
Step8: output the content as new XMLs
Step9: delete the old XMLs in MDCS
Step10: upload the revised XMLs
Step11: Vidualize data
Step12: Plot the data
|
<ASSISTANT_TASK:>
Python Code:
import pip
requires = ['numpy','xmltodict']
installed_packages = pip.get_installed_distributions()
installed_packages_list = sorted(["%s==%s" % (i.key, i.version) for i in installed_packages])
matching = [[libs for libs in installed_packages_list if x in libs] for x in requires]
if len(matching) == len(requires):
print "requirement fullfilled"
else:
print "something is missing"
#!conda install numpy
#!pip install xmltodict
import os
file_check = ['DataModelDict.py','GE-DiffusionCouple-IN100-IN718.xml','GE-DiffusionCouple-IN718-R95.xml'
,'GE-DiffusionCouple-R88-R95.xml','mdcs','data','fig']
files = os.listdir(".")
matching = filter(None, [[fs for fs in files if x in fs] for x in file_check])
if len(matching) == len(file_check):
print "files in the folder"
else:
print "something is missing"
# standard python libraries
import numpy as np
import glob
import requests
import xmltodict
# -------------------------------------------- Matplotlib for plotting
# NEED to be installed (anaconda)
# http://matplotlib.org/
import matplotlib.pyplot as plt
import matplotlib.colorbar as clb
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.gridspec as gridspec
from matplotlib import ticker
from matplotlib.ticker import LogLocator
%matplotlib inline
# customized library by Zachary Trautt
import mdcs
# customized library by Lucas Hale
from DataModelDict import DataModelDict
from IPython.display import Image
Image("fig/file_problem.png", width=600)
# ---- setup user information to sign in MDCS
USER = "admin"
PSWD = "admin"
MDCS_URL = "http://127.0.0.1:8000"
xml_files = glob.glob("data/*.xml")
print "Files: ",len(xml_files), xml_files
filetitle = []
xmlfile = []
fig_files = []
for i in range(len(xml_files)):
filetitle.append(xml_files[i][5:-4])
fig_files.append(str(xml_files[i][0:len(xml_files[i])-3] + 'tif'))
print "XML files: ",filetitle
print "Image files: ",fig_files
figURL = []
for i in range(len(xml_files)):
figURL.append(mdcs.blob.upload(fig_files[i],MDCS_URL,USER,PSWD,'cert'))
print figURL
for i in range(len(xml_files)):
print mdcs.curate_as(xml_files[i],filetitle[i],MDCS_URL, \
USER,PSWD,'cert',template_title="mod_demo_diffusion0224")
qres = []
file_id = []
for i in range(len(xml_files)):
content = mdcs.explore.query(MDCS_URL,USER,PSWD,'cert', \
None,str({'title':filetitle[i]}))
qres.append(xmltodict.parse(content[0]['content']))
file_id.append(content[0]['_id'])
res = []
for i in range(len(xml_files)):
qres[i]['experiment']['experimentType']['chemicalDiffusivity'] \
['dataAnalysis']['micrographOfStructure']['fileExtension'] = figURL[i]
res.append(xmltodict.unparse(qres[i]))
filename = []
# write information to interdiffusion.xml
for i in range(len(xml_files)):
filename.append(str(filetitle[i] + str('.xml')))
diffusion = open(filename[i], 'w')
diffusion.write(res[0].encode('utf8'))
diffusion.close()
for i in range(len(xml_files)):
mdcs.explore.delete(file_id[i],MDCS_URL,USER,PSWD,cert=None)
for i in range(len(xml_files)):
print filename[i],mdcs.curate_as(filename[i],filetitle[i]
,MDCS_URL,USER,PSWD,'cert',template_title="mod_demo_diffusion0224")
content = mdcs.explore.query(MDCS_URL,USER,PSWD,'cert',
None,str({'title':filetitle[0]}))
qdata = DataModelDict(content[0]['content'])
table = qdata.find('table')
distable = []
for row in table['rows'].iterlist('row'):
disrow = []
for column in row.iterlist('column'):
disrow.append(column['#text'])
distable.append(disrow)
del distable[0]
distable = np.array(distable)
fig, ax = plt.subplots(figsize=(9, 6))
for i in range(2,len(distable[0])):
ax.plot(distable[:,0],distable[:,i],'o-')
fig.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Attendees evolution
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
data = [('year', 'location', 'attendees'),
(2002, 'Charleroi', 240),
(2003, 'Charleroi', 300),
(2004, 'Göteborg', 'nan'),
(2005, 'Göteborg', 'nan'),
(2006, 'Geneva', 'nan'),
(2007, 'Vilnius', 'nan'),
(2008, 'Vilnius', 206),
(2009, 'Birmingham', 410),
(2010, 'Birmingham', 446),
(2011, 'Florence', 670),
(2012, 'Florence', 760),
(2013, 'Florence', 870),
(2014, 'Berlin', 1250),
(2015, 'Bilbao', 1100),]
names = data[0]
eps = {name: [] for name in names}
for line in data[1:]:
for pos, name in enumerate(names):
eps[name].append(line[pos])
plt.plot(eps['year'], eps['attendees'])
data = [('year', 'location', 'attendees'),
(2014, 'Bilbao', 0),
(2015, 'Bilbao', 1100)]
names = data[0]
eps = {name: [] for name in names}
for line in data[1:]:
for pos, name in enumerate(names):
eps[name].append(line[pos])
plt.plot(eps['year'], eps['attendees'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2
Step2: 3
Step3: 4
Step4: 5
|
<ASSISTANT_TASK:>
Python Code:
csv_list = open("../data/GP02/US_births_1994-2003_CDC_NCHS.csv").read().split("\n")
csv_list[0:10]
def read_csv(filename):
string_data = open(filename).read()
string_list = string_data.split("\n")[1:]
final_list = []
for row in string_list:
string_fields = row.split(",")
int_fields = []
for value in string_fields:
int_fields.append(int(value))
final_list.append(int_fields)
return final_list
cdc_list = read_csv("../data/GP02/US_births_1994-2003_CDC_NCHS.csv")
cdc_list[0:10]
def month_births(data):
births_per_month = {}
for row in data:
month = row[1]
births = row[4]
if month in births_per_month:
births_per_month[month] = births_per_month[month] + births
else:
births_per_month[month] = births
return births_per_month
cdc_month_births = month_births(cdc_list)
cdc_month_births
def dow_births(data):
births_per_dow = {}
for row in data:
dow = row[3]
births = row[4]
if dow in births_per_dow:
births_per_dow[dow] = births_per_dow[dow] + births
else:
births_per_dow[dow] = births
return births_per_dow
cdc_dow_births = dow_births(cdc_list)
cdc_dow_births
def calc_counts(data, column):
sums_dict = {}
for row in data:
col_value = row[column]
births = row[4]
if col_value in sums_dict:
sums_dict[col_value] = sums_dict[col_value] + births
else:
sums_dict[col_value] = births
return sums_dict
cdc_year_births = calc_counts(cdc_list, 0)
cdc_month_births = calc_counts(cdc_list, 1)
cdc_dom_births = calc_counts(cdc_list, 2)
cdc_dow_births = calc_counts(cdc_list, 3)
cdc_year_births
cdc_month_births
cdc_dom_births
cdc_dow_births
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notes
Step2: Define parameters
Step3: Load WWLN data and analyze it
Step4: Save data
Step5: Load data (Blitzortung)
Step6: Plot lightning results
Step7: Map data ##
|
<ASSISTANT_TASK:>
Python Code:
# Load required packages
import numpy as np
import datetime as dt
from datetime import timedelta
import pandas as pd
from tqdm import tqdm
import os
import pkg_resources as pkg
import geopandas as gpd
from shapely.geometry import Point
from bokeh.plotting import Figure, show, output_notebook, vplot
from bokeh.charts import Bar
from bokeh.io import gridplot
from bokeh.models import HoverTool, Span, Range1d, LinearAxis
from bokeh.models.sources import ColumnDataSource
import json
import gzip
from bs4 import BeautifulSoup as bs
import requests
import stormstats
import codecs
import folium
output_notebook()
"http://en.blitzortung.org/archive_data.php?session=1&stations_users=0&selected_numbers=*&end_date=1478131200&end_time=43200&start_date=1478131200&start_time=0&rawdata_image=0&north=90&west=-180&east=180&south=-90&map=0&width_orig=640&width_result=640&agespan=60&frames=12&delay=100&last_delay=1000&show_result=1"
url = "http://en.blitzortung.org/archive_data.php?session=1&stations_users=0"\
"&selected_numbers=*&end_date=1478131200&end_time=76200&start_date=1478131200&"\
"start_time=69000&rawdata_image=0&north=90&west=-180&east=180&south=-90&map=0&"\
"width_orig=640&width_result=640&agespan=60&frames=12&delay=100&last_delay=1000&show_result=1"
#csv_target ="/Tmp/archive_1_raw.txt.gz"
url
import wget
url = "http://en.blitzortung.org/Tmp/archive_2_raw.txt.gz"
fname = wget.download(url)
requests.get(url, stream=True)
#r = requests.get(url, stream=True, auth=HTTPBasicAuth('user', 'pass'))
r = requests.get(url)
soup = bs(r.text, "lxml")
r
line
for line in soup:
print(line)
tst = r.content
bs(r.text)
stormstats.downloader.get_data(start="2015-02-01T06:30", end="2015-02-01T10:05")
stormstats.storm.gen_listfiles(data_path='./tmp_data/', ext='.gz')
import pandas as pd
import json
tmp = json.JSONDecoder('tmp_data/bz-2015-02-01-06-40.json')
json.JSONDecoder.decode('tmp_data/bz-2015-02-01-06-40.json','s')
pd.read_json('tmp_data/bz-2015-02-01-06-40.json')
data_path='data/WWLN/Jan2016/'
#data_path = "WWLN"
# time step (in minutes) used to count number fo lightning. Max time step is 1440 (1 day)
# and it should have multiple value of 1440 minutes.
time_step=60
# if select_area=False: all lightning are counted
# if select_area=True: only lightning in selected area are counted (lat and lon limits are needed)
select_area=True
# define area to count lightning strikes (lat, lon) if select_area=True
north_lat_limit=70
south_lat_limit=30
west_lon_limit=-15
east_lon_limit=45
# exclude all lightning data with error larger than max_error
max_error=30
# exclude all lightning strikes detected with less or equal than min_stations
min_stations=5
# note: in raw data lightning strikes detected with less than 5 stations and error larger than 30
# are already excluded
# generate list of files in data directory, if start_date and end_date are defined data only in this
#period will be used
listfiles=gen_listfiles(data_path=data_path, ext='.loc', start_date='01-01-2016', end_date='01-02-2016')
# make loop for all files
i=0
for file in tqdm(listfiles):
# read lightning data
LN_data = read_WWLN(file=data_path+file)
# --- make quality check and select lightning in given area
# exclude lightning data that have larger error than max_error
LN_data=LN_data.loc[LN_data['err']<=max_error]
# exclude lightning data that have less than min_stations
LN_data=LN_data.loc[LN_data['#sta']>=min_stations]
# select only lightning strikes in given area
if(select_area):
LN_data=LN_data.loc[(LN_data['lat']<=north_lat_limit) & (LN_data['lat']>=south_lat_limit) &
(LN_data['lon']>=west_lon_limit) & (LN_data['lon']<=east_lon_limit)]
# --- Count lightning strikes according to time_step defined
LN_tmp=count_lightning(LN_data, time_step)
# add data to existing df
if(i>=1):
LN_count=LN_count.append(LN_tmp)
else:
LN_count=LN_tmp
i=i+1
#LN_count
LN_count.to_csv('WWLN_data_europe.csv')
# get data from Blitzortung server
get_data(start="2015-02-01T06:30", end="2015-02-01T10:05",
dl_link="http://data.blitzortung.org/Data_1/Protected/Strokes/")
#
# define Bokeh tools
TOOLS = "pan, wheel_zoom, box_zoom, hover, reset, save"
# calculate error bars
#err_y1=LN_count['#sta_mean']-(LN_count['#sta_std']/(np.sqrt(LN_count['count'])))
#err_y2=LN_count['#sta_mean']+(LN_count['#sta_std']/(np.sqrt(LN_count['count'])))
fig_LN_count_a = Figure(plot_width=800, plot_height=400, title="Lightning count", tools=TOOLS,
x_axis_label="Date", y_axis_label="Nr. of lightning strikes", x_axis_type = "datetime",
title_text_font_size='22pt')
fig_LN_count_a.line(LN_count['count'].index, LN_count['count'].values, color='red')
fig_LN_count_b = Figure(plot_width=800, plot_height=300, tools=TOOLS,
y_axis_label="Error", x_axis_type = "datetime", x_range=fig_LN_count_a.x_range)
fig_LN_count_b.line(LN_count['err_mean'].index, LN_count['err_mean'].values, color='blue')
fig_LN_count_c = Figure(plot_width=800, plot_height=300, tools=TOOLS,
x_axis_label="Date", y_axis_label="Mean nr. of stations", x_axis_type = "datetime",
x_range=fig_LN_count_a.x_range)
fig_LN_count_c.line(LN_count['#sta_mean'].index, LN_count['#sta_mean'].values, color='black')
fig_LN_count = gridplot([[fig_LN_count_a],[fig_LN_count_b],[fig_LN_count_c]])
show(fig_LN_count)
# mapping function doesnt seem to handel the large sizes well, so I am limiting the size for now
mx = get_map(strike_data = LN_data)
mx
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Alloocate a PyTorch Tensor on the GPU
|
<ASSISTANT_TASK:>
Python Code:
% reset -f
from __future__ import print_function
from __future__ import division
import math
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import sys
print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__CUDA VERSION')
from subprocess import call
# call(["nvcc", "--version"]) does not work
! nvcc --version
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())
print ('Available devices ', torch.cuda.device_count())
print ('Current cuda device ', torch.cuda.current_device())
x=torch.Tensor(3,4)
if torch.cuda.is_available():
x = x.cuda()*2
print (type(x))
print (x)
import numpy as np
import torch.cuda as cu
import contextlib
import time
# allocates a tensor on GPU 1
a = torch.cuda.FloatTensor(1)
# transfers a tensor from CPU to GPU 1
b = torch.FloatTensor(1).cuda()
# Timing helper with CUDA synchonization
@contextlib.contextmanager
def timing(name):
cu.synchronize()
start_time = time.time()
yield
cu.synchronize()
end_time = time.time()
print ('{} {:6.3f} seconds'.format(name, end_time-start_time))
for shape in [(128**3,), (128,128**2), (128,128,128), (32,32,32,64)]:
print ('shape {}, {:.1f} MB'.format(shape, np.zeros(shape).nbytes/1024.**2))
with timing('from_numpy sent to GPU '): torch.from_numpy (np.zeros(shape)).cuda()
with timing('CPU constructor '): torch.FloatTensor(np.zeros(shape))
with timing('CPU constructor sent to GPU'): torch.FloatTensor(np.zeros(shape)).cuda()
with timing('GPU constructor '): cu. FloatTensor(np.zeros(shape))
print
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: We have to make sure all conditions have the same counts, as the ANOVA
Step3: Create TFR representations for all conditions
Step4: Setup repeated measures ANOVA
Step5: Now we'll assemble the data matrix and swap axes so the trial replications
Step6: While the iteration scheme used above for assembling the data matrix
Step7: Account for multiple comparisons using FDR versus permutation clustering test
Step8: A stat_fun must deal with a variable number of input arguments.
Step9: Create new stats image with only significant clusters
Step10: Now using FDR
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Alex Rockhill <aprockhill@mailbox.org>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_raw.fif'
event_fname = meg_path / 'sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332'
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
epochs.pick_channels([ch_name]) # restrict example to one channel
epochs.equalize_event_counts(event_id)
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = freqs / freqs[0]
zero_mean = False # don't correct morlet wavelet to be of mean zero
# To have a true wavelet zero_mean should be True but here for illustration
# purposes it helps to spot the evoked response.
epochs_power = list()
for condition in [epochs[k] for k in event_id]:
this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,
decim=decim, average=False, zero_mean=zero_mean,
return_itc=False)
this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))
this_power = this_tfr.data[:, 0, :, :] # we only have one channel.
epochs_power.append(this_power)
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] // n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_freqs = len(freqs)
times = 1e3 * epochs.times[::decim]
n_times = len(times)
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# so we have replications × conditions × observations
# where the time-frequency observations are freqs × times:
print(data.shape)
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
fig, axes = plt.subplots(3, 1, figsize=(6, 6))
# let's visualize our effects by computing f-images
for effect, sig, effect_label, ax in zip(fvals, pvals, effect_labels, axes):
# show naive F-values in gray
ax.imshow(effect, cmap='gray', aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
# create mask for significant time-frequency locations
effect[sig >= 0.05] = np.nan
c = ax.imshow(effect, cmap='autumn', aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
fig.colorbar(c, ax=ax)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title(f'Time-locked response for "{effect_label}" ({ch_name})')
fig.tight_layout()
effects = 'A:B'
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
F_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail,
n_jobs=None, n_permutations=n_permutations, buffer_size=None,
out_type='mask')
good_clusters = np.where(cluster_p_values < .05)[0]
F_obs_plot = F_obs.copy()
F_obs_plot[~clusters[np.squeeze(good_clusters)]] = np.nan
fig, ax = plt.subplots(figsize=(6, 4))
for f_image, cmap in zip([F_obs, F_obs_plot], ['gray', 'autumn']):
c = ax.imshow(f_image, cmap=cmap, aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
fig.colorbar(c, ax=ax)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title(f'Time-locked response for "modality by location" ({ch_name})\n'
'cluster-level corrected (p <= 0.05)')
fig.tight_layout()
mask, _ = fdr_correction(pvals[2])
F_obs_plot2 = F_obs.copy()
F_obs_plot2[~mask.reshape(F_obs_plot.shape)] = np.nan
fig, ax = plt.subplots(figsize=(6, 4))
for f_image, cmap in zip([F_obs, F_obs_plot2], ['gray', 'autumn']):
c = ax.imshow(f_image, cmap=cmap, aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
fig.colorbar(c, ax=ax)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title(f'Time-locked response for "modality by location" ({ch_name})\n'
'FDR corrected (p <= 0.05)')
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enter your project and GCS bucket
Step2: Initialize Vertex SDK for Python
Step6: Write your Training Script
Step7: Launch a Training Job to Create a Model
Step8: Deploy Your Model
Step9: Predict on the Endpoint
|
<ASSISTANT_TASK:>
Python Code:
!pip3 uninstall -y google-cloud-aiplatform
!pip3 install google-cloud-aiplatform
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import sys
if "google.colab" in sys.modules:
from google.colab import auth
auth.authenticate_user()
MY_PROJECT = "YOUR PROJECT"
MY_STAGING_BUCKET = "gs://YOUR BUCKET" # bucket should be in same region as ucaip
from google.cloud import aiplatform
aiplatform.init(project=MY_PROJECT, staging_bucket=MY_STAGING_BUCKET)
%%writefile training_script.py
# Source: https://cloud.google.com/vertex-ai/docs/tutorials/image-recognition-custom
import argparse
import logging
import os
import tensorflow as tf
import tensorflow_datasets as tfds
IMG_WIDTH = 128
def normalize_img(image):
Normalizes image.
* Resizes image to IMG_WIDTH x IMG_WIDTH pixels
* Casts values from `uint8` to `float32`
* Scales values from [0, 255] to [0, 1]
Returns:
A tensor with shape (IMG_WIDTH, IMG_WIDTH, 3). (3 color channels)
image = tf.image.resize_with_pad(image, IMG_WIDTH, IMG_WIDTH)
return image / 255.
def normalize_img_and_label(image, label):
Normalizes image and label.
* Performs normalize_img on image
* Passes through label unchanged
Returns:
Tuple (image, label) where
* image is a tensor with shape (IMG_WIDTH, IMG_WIDTH, 3). (3 color
channels)
* label is an unchanged integer [0, 4] representing flower type
return normalize_img(image), label
def get_args():
Argument parser.
Returns:
Dictionary of arguments.
parser = argparse.ArgumentParser(description='Flower classification sample')
parser.add_argument(
'--tfds',
default=None,
help='The tfds URI from https://www.tensorflow.org/datasets/ to load the data from')
args = parser.parse_args()
return args
# Training settings
args = get_args()
if 'AIP_MODEL_DIR' not in os.environ:
raise KeyError(
'The `AIP_MODEL_DIR` environment variable has not been' +
'set. See https://cloud.google.com/vertex-ai/docs/tutorials/image-recognition-custom/training'
)
output_directory = os.environ['AIP_MODEL_DIR']
logging.info('Loading and preprocessing data ...')
dataset = tfds.load(args.tfds,
split='train',
try_gcs=True,
shuffle_files=True,
as_supervised=True)
dataset = dataset.map(normalize_img_and_label,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.cache()
dataset = dataset.shuffle(1000)
dataset = dataset.batch(128)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
logging.info('Creating and training model ...')
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,
3,
padding='same',
activation='relu',
input_shape=(IMG_WIDTH, IMG_WIDTH, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation="relu"),
tf.keras.layers.Dense(5) # 5 classes
])
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(dataset, epochs=10)
logging.info(f'Exporting SavedModel to: {output_directory}')
# Add softmax layer for intepretability
probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
probability_model.save(output_directory)
job = aiplatform.CustomTrainingJob(
display_name="train-flowers-dist-1-replica",
script_path="training_script.py",
container_uri="gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest",
requirements=["gcsfs==0.7.1"],
model_serving_container_image_uri="gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest",
)
model = job.run(
args=["--tfds", "tf_flowers:3.*.*"],
replica_count=1,
model_display_name="flowers-model",
)
endpoint = model.deploy(machine_type="n1-standard-4")
!gsutil -m cp -R gs://cloud-ml-data/img/flower_photos/daisy/14221848160_7f0a37c395.jpg .
!gsutil -m cp -R gs://cloud-ml-data/img/flower_photos/tulips/13289268363_b9337d751e.jpg .
!gsutil -m cp -R gs://cloud-ml-data/img/flower_photos/sunflowers/14623719696_1bb7970208_n.jpg .
import numpy as np
from PIL import Image
daisy_floats = np.array(Image.open("14221848160_7f0a37c395.jpg"))
small_image = np.array(Image.fromarray(np.uint8(daisy_floats)).resize((128, 128)))
endpoint.predict(instances=[small_image.tolist()])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def PrimeDigitNumber(N , size ) :
ans =[""] * size
ns = 0 ;
small = 0 ;
p =[0 , 0 , 1 , 1 , 0 , 1 , 0 , 1 , 0 , 0 ]
prevprime =[0 , 0 , 0 , 2 , 3 , 3 , 5 , 5 , 7 , 7 ]
if(size == 1 ) :
ans[0 ] = prevprime[ord(N[0 ] ) - ord('0' ) ] + ord('0' ) ;
ans[1 ] = ' ' ;
return ' ' . join(ans ) ;
if(N[0 ] == '1' ) :
for i in range(size - 1 ) :
ans[i ] = '7'
ans[size - 1 ] = ' ' ;
return ' ' . join(ans )
i = 0
while(i < size and small == 0 ) :
if(p[ord(N[i ] ) - ord('0' ) ] == 1 ) :
ans[ns ] = N[i ]
ns += 1
else :
if(p[ord(N[i ] ) - ord('0' ) ] == 0 and prevprime[ord(N[i ] ) - ord('0' ) ] != 0 ) :
ans[ns ] = prevprime[ord(N[i ] ) - ord('0' ) ] + ord('0' ) ;
small = 1
ns += 1
elif(p[ord(N[i ] ) - ord('0' ) ] == 0 and prevprime[ord(N[i ] ) - ord('0' ) ] == 0 ) :
j = i ;
while(j > 0 and p[ord(N[j ] ) - ord('0' ) ] == 0 and prevprime[ord(N[j ] ) - ord('0' ) ] == 0 ) :
ans[j ] = N[j ] = '7' ;
N[j - 1 ] = prevprime[ord(N[j - 1 ] ) - ord('0' ) ] + ord('0' ) ;
ans[j - 1 ] = N[j - 1 ] ;
small = 1 ;
j -= 1
i = ns
i += 1
if(small == 0 ) :
if(prevprime[ord(N[size - 1 ] ) - ord('0' ) ] + ord('0' ) != ord('0' ) ) :
ans[size - 1 ] = prevprime[ord(N[size - 1 ] ) - ord('0' ) ] + ord('0' ) ;
else :
j = size - 1 ;
while(j > 0 and prevprime[ord(N[j ] ) - ord('0' ) ] == 0 ) :
ans[j ] = N[j ] = '7' ;
N[j - 1 ] = prevprime[ord(N[j - 1 ] ) - ord('0' ) ] + ord('0' ) ;
ans[j - 1 ] = N[j - 1 ] ;
small = 1 ;
j -= 1
while(ns < size ) :
ans[ns ] = '7'
ns += 1
ans[ns ] = ' ' ;
k = 0 ;
while(ans[k ] == '0' ) :
k += 1
return(ans + k )
if __name__== "__main __":
N = "1000";
size = len(N ) ;
print(PrimeDigitNumber(N , size ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Données
Step7: Programmation linéaire
Step8: Variables et Objectif
Step9: Contraintes
Step10: Résolution
Step11: Parrainage
|
<ASSISTANT_TASK:>
Python Code:
# Nom de fichiers
fichierParrain = "parrains.csv"
fichierFilleul = "filleuls.csv"
fichierResultat = "parrainage.csv"
# Imports
import csv
import glob
import pulp # LP
# pulp.pulpTestAll() # Test
def fetch_row_parrain(row, line, file):
Renvoie une ligne de type `parrain`
try:
return row[0], [row[1], row[2], row[3]]
except:
print("Erreur de lecture fichier {}, ligne: {}".format(file, line))
return []
def fetch_row_filleul(row, line, file):
Renvoie une ligne de type `filleul`
try:
return row[0], [row[1], row[2], row[3]]
except:
print("Erreur de lecture fichier {}, ligne: {}".format(file, line))
return []
def fetch_row(row, line, file, category):
Renvoie une ligne sous forme d'un tableau
Input:
- row: ligne
- line: numéro de la ligne dans le fichier
- file: nom du fichier
- category: `parrain` ou `filleul`
Output:
- un tableau
if category == "parrain":
return fetch_row_parrain(row, line, file)
elif category == "filleul":
return fetch_row_filleul(row, line, file)
else:
raise Exception("Un fichier doit être de type `parrain` ou `filleul`")
def fetch_from_file(file, category):
Importe un fichier cvs dans un dictionnaire
Input:
- file: nom du fichier
- category: `parrain` ou `filleul`
data = dict()
with open (file, 'r') as csv_file:
spamreader = csv.reader(csv_file, delimiter=';', skipinitialspace=True)
for row in spamreader:
# Fetch line
line = fetch_row(row, spamreader.line_num, file, category)
if line == []: # Si erreur de lecture, passer au suivant
continue
if line[0] in data:
print("Doublon {}, dans le fichier {}".format(line[0], file))
else:
data[line[0]] = line[1]
return data
dataParrains = fetch_from_file(fichierParrain, "parrain")
dataFilleuls = fetch_from_file(fichierFilleul, "filleul")
# On s'assure que les tables construites ne sont pas vides...
assert(len(dataParrains) > 0)
assert(len(dataFilleuls) > 0)
def matching(s, t):
Renvoie une note de compatibilité entre `s` et `t`.
Plus la note est élevée, moins `s` et `t` sont compatibles.
res = 1
if (s[0] != t[0]): # Option
res += 7
if (s[1] != t[1]): # Primant
res += 2
if (s[2] == t[2]): # Tutorat
res += 10
return res
arrow_set = [(s, t) for s in dataParrains for t in dataFilleuls]
assert(len(arrow_set) == len(dataParrains) * len(dataFilleuls))
# Variables
print("Variables...")
arrows = pulp.LpVariable.dicts('affectation', arrow_set, lowBound = 0, upBound = 1, cat = pulp.LpInteger)
# Modèle
print("Modèle...")
model = pulp.LpProblem("Parrainage", pulp.LpMinimize)
# Objectif
print("Objectif...")
model.setObjective(pulp.lpSum([matching(dataParrains[s], dataFilleuls[t]) * arrows[(s, t)]
for (s, t) in arrow_set]))
# Au plus 4 filleuls par parrain
for s in dataParrains:
model.addConstraint(pulp.lpSum(arrows[(s, t)] for t in dataFilleuls) <= 4)
# Exactement 1 parrain
for t in dataFilleuls:
model.addConstraint(pulp.lpSum(arrows[(s, t)] for s in dataParrains) == 1)
print(model.solve())
parrainage = [a for a in arrow_set if arrows[a].value() == 1]
# Écrit la solution dans `fichierResultat`
with open(fichierResultat, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(parrainage)
# Afficher la solution ici
# print("Parrain -> Filleul\n")
# for x in parrainage:
# print("{} -> {}".format(x[0], x[1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Synthetic test data generation
Step2: Testing syntehtic data functions
Step3: Implimenting word search using string comparison (base line technique)
Step4: Test speed of finding number of records with specific words
Step5: Implimenting word indexing/search by prime factorisation
Step6: Searching
Step7: Test speed of finding numberof records with specific words
Step8: Compare Performance
Step9: Performance against number of comments to search through
Step10: Performance against number of words searched for
Step11: Performance against length of comments to search through
Step12: Performance against length of dictionary
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import random
import time
import copy
import numpy as np
import numpy.core.defchararray as npstr
import matplotlib.pyplot as plt
# generates a random string of letters for a given length
def generateWord(length):
abc = 'abcdefghijklmnopqrstuvwxyz'
word = ''
for i in range(length):
word += random.choice(abc)
return word
# generates a normally distributed list of numbers with a given length, mean and diviation
def generateLengths(count,mean, dist, showGraph = False):
s = np.random.normal(mean, dist, count)
s = np.round(s,0).astype(int)
lessThanOne = s < 1
s[lessThanOne] = 1
if showGraph:
plt.hist(s, 30, normed=True)
plt.title("Histogram of lengths")
plt.show()
return s
# generates a random dictionary of words whose lengths are normally distributed
def generateDictionary(count):
currentDict = []
wordLengths = generateLengths(count,5,1)
for length in wordLengths:
oldLen = len(currentDict)
while(len(currentDict) == oldLen):
word = generateWord(length)
if word not in currentDict:
currentDict.append(word)
return np.asarray(currentDict)
# Selects random words from a given dictionary that matches given length requirments
def pickRandomWordsFromDict(dictionary, minLength, maxLength, count):
output = []
for word in dictionary:
if maxLength >= len(word) >= minLength:
output.append(word)
if len(output)==count:
return output
return ValueError('Dictionary did not conatin enough words of your min length')
# generates a list of randomy generated strings of text made of of words from a given dictionary
# and where the number of words used in the text are normally distributed to a given mean and diviation
def generateComments(count,dictionary,mean,dist,showGraph = False):
comments = []
lengths = generateLengths(count,mean,dist,showGraph)
for length in lengths:
comment = ''
for i in range(length):
comment += random.choice(dictionary) + ' '
comments.append(comment.rstrip())
return np.asarray(comments)
generateWord(3)
generateLengths(100,5,1,True)
dictionary = generateDictionary(100)
dictionary [1:5]
pickRandomWordsFromDict(dictionary,3,8,3)
comments = generateComments(1000000,dictionary,4,1,True)
comments[1:5]
# Finds records that conatin specific words using numpy's string comparitor function
def findCommentsWithWords(comments,words):
currentVocab = copy.deepcopy(comments)
for w in words:
currentVocab = currentVocab[(npstr.find(currentVocab, w) != -1)]
return currentVocab
#Returns number of records that contain specific words
def numberOfMatches(comments, words):
result = findCommentsWithWords(comments,words)
return len(result)
randoWords = pickRandomWordsFromDict(dictionary,3,8,2)
randoWords
start = time.time()
print(str(numberOfMatches(comments,randoWords)) + ' number of records found containing ' + str(randoWords))
end = time.time()
print(str(end - start)+' seconds')
# Assigns a prime number to each word in a given dictionary and returns a dict object
def createIndexSchema(dictionary):
primes = np.genfromtxt ('primes.csv', delimiter=",").astype(int)
primeFit = primes[1:len(dictionary)+1,1]
index = dict(np.c_[dictionary,primeFit])
return index
# Converts a list of strings into the products of the words in the string's prime number indexes
def indexComments(comments,indexSchemal):
output = []
for comment in comments:
prod = 1
words = comment.split(' ')
for word in words:
if word in indexSchemal:
prod *= int(indexSchemal[word])
output.append(prod)
return output
# Return the indexed dictionary and text data for a given normal dictionary of words and list of text data
def preprocessPrimeComments(dictionary,comments):
indexSchemal = createIndexSchema(dictionary)
indexedComments = indexComments(comments,indexSchemal)
return [indexSchemal,indexedComments]
# converts given words into their indexed product
def convertWordsToProduct(indexSchema, words):
output = 1
for word in words:
output *= int(indexSchema[word])
return output
# calculates the modulo of the indexed text data against the given search word product
# and return a boolean list of which records match the search
def searchByPrimeFact(indexedComments,serachProduct):
return (np.mod(indexedComments, serachProduct) == 0)
# returns a boolean list of which of the given indexed records conatain
# the given words using the given indexed dictionary
def findCommentsUsingPrimeFact(indexedComments,index, words):
prod = convertWordsToProduct(index, words)
return searchByPrimeFact(indexedComments,prod)
[index, indexedComments] = preprocessPrimeComments(dictionary,comments)
start = time.time()
print(str(findCommentsUsingPrimeFact(indexedComments,index,randoWords).sum())+ ' number of records found containing ' + str(randoWords) )
end = time.time()
print(str(end - start)+' seconds')
# times the execution fo a function returning the seconds it took and the output of the function
def timeFunction(func, *params):
start = time.time()
output = func(*params)
end = time.time()
return[output, (end - start)]
# generate text records using the previously defined synthetic data function
def generateCommentData(dictLength, numOfComments, commentLengthMean, commentLengthDist, graph = False):
random.seed(50)
np.random.seed(15)
dictionary = generateDictionary(dictLength)
comments = generateComments(numOfComments,dictionary,commentLengthMean,commentLengthDist,graph)
return [dictionary, comments]
[d,c] = generateCommentData(300,100000,7,1, True)
[d,c] = generateCommentData(300,100000,7,1, False)
wordsToserachFor = pickRandomWordsFromDict(d,3,7,3)
[indx,indxCom] = preprocessPrimeComments(d,c)
stringCompTimings1 = []
primeTimings1 = []
xIncrement1 = []
for n in range(100,100000,1000):
xIncrement1.append(n)
[o,t] = timeFunction(numberOfMatches, c[:n], wordsToserachFor)
stringCompTimings1.append(t)
[o,t] = timeFunction(findCommentsUsingPrimeFact, indxCom[:n],indx, wordsToserachFor)
primeTimings1.append(t)
plt.figure()
plt.title("Plot of speed of search based on number of commenst to search through")
plt.ylabel("Time taken (seconds)")
plt.xlabel("Number of comments to search through")
strCompPlot, = plt.plot(xIncrement1,stringCompTimings1, label="Numpy String Compare")
primeFactPlot, = plt.plot(xIncrement1,primeTimings1, label="My Prime Factorisation")
plt.legend(handles=[strCompPlot,primeFactPlot],loc=2)
plt.show()
stringCompTimings2 = []
primeTimings2 = []
xIncrement2 = []
for n in range(1,100):
xIncrement2.append(n)
randWords2 = pickRandomWordsFromDict(d,2,7,n)
[o,t] = timeFunction(numberOfMatches, c[:100000], randWords2)
stringCompTimings2.append(t)
[o,t] = timeFunction(findCommentsUsingPrimeFact, indxCom[:10000],indx, randWords)
primeTimings2.append(t)
plt.figure()
plt.title("Plot of speed of search based on number of words searched for")
plt.ylabel("Time taken (seconds)")
plt.xlabel("Number of words searched for")
strCompPlot, = plt.plot(xIncrement2,stringCompTimings2, label="Numpy String Compare")
primeFactPlot, = plt.plot(xIncrement2,primeTimings2, label="My Prime Factorisation")
plt.legend(handles=[strCompPlot,primeFactPlot],loc=5)
plt.show()
stringCompTimings3 = []
primeTimings3 = []
xIncrement3 = []
for n in range(5,500,10):
xIncrement3.append(n)
[d3,c3] = generateCommentData(30,1000,n,1)
randWords3 = pickRandomWordsFromDict(d3,2,7,4)
[o,t] = timeFunction(numberOfMatches, c3, randWords3)
stringCompTimings3.append(t)
[indexSchema3, indxCom3] = preprocessPrimeComments(d3,c3)
[o,t] = timeFunction(findCommentsUsingPrimeFact, indxCom3,indexSchema3, randWords3)
primeTimings3.append(t)
plt.figure()
plt.title("Plot of speed of search based on length of comments to search through")
plt.ylabel("Time taken (seconds)")
plt.xlabel("Mean length of comments searched through")
strCompPlot, = plt.plot(xIncrement3,stringCompTimings3, label="Numpy String Compare")
primeFactPlot, = plt.plot(xIncrement3,primeTimings3, label="My Prime Factorisation")
plt.legend(handles=[strCompPlot,primeFactPlot],loc=2)
plt.show()
stringCompTimings4 = []
primeTimings4 = []
xIncrement4 = []
for n in range(10,1000,10):
xIncrement4.append(n)
[d4,c4] = generateCommentData(n,2000,10,1)
randWords4 = pickRandomWordsFromDict(d4,2,7,3)
[o,t] = timeFunction(numberOfMatches, c4, randWords4)
stringCompTimings4.append(t)
[indexSchema4, indxCom4] = preprocessPrimeComments(d4,c4)
[o,t] = timeFunction(findCommentsUsingPrimeFact, indxCom4,indexSchema4, randWords4)
primeTimings4.append(t)
plt.figure()
plt.title("Plot of speed of search based on length of dictionary")
plt.ylabel("Time taken (seconds)")
plt.xlabel("Number of unique words in text (dictionary length)")
strCompPlot, = plt.plot(xIncrement4,stringCompTimings4, label="Numpy String Compare")
primeFactPlot, = plt.plot(xIncrement4,primeTimings4, label="My Prime Factorisation")
plt.legend(handles=[strCompPlot,primeFactPlot],loc=1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Execute tests
Step2: Using pytest fixtures
|
<ASSISTANT_TASK:>
Python Code:
import ipytest
ipytest.autoconfig()
%%ipytest
# define the tests
def test_my_func():
assert my_func(0) == 0
assert my_func(1) == 0
assert my_func(2) == 2
assert my_func(3) == 2
def my_func(x):
return x // 2 * 2
%%ipytest
import pytest
@pytest.mark.parametrize('input,expected', [
(0, 0),
(1, 0),
(2, 2),
(3, 2),
])
def test_parametrized(input, expected):
assert my_func(input) == expected
@pytest.fixture
def my_fixture():
return 42
def test_fixture(my_fixture):
assert my_fixture == 42
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The relevant part of the data we will model looks as follows
Step2: As you can see, we have multiple radon measurements (log-converted to be on the real line) in a county and whether the measurement has been taken in the basement (floor == 0) or on the first floor (floor == 1). Here we want to test the prediction that radon concentrations are higher in the basement.
Step3: Hierarchical Model
Step4: The marginal posteriors in the left column are highly informative. mu_a tells us the group mean (log) radon levels. mu_b tells us that the slope is significantly negative (no mass above zero), meaning that radon concentrations are higher in the basement than first floor. We can also see by looking at the marginals for a that there is quite some differences in radon levels between counties; the different widths are related to how much measurements we have per county, the more, the higher our confidence in that parameter estimate.
Step5: In the above plot we have the data points in black of three selected counties. The thick lines represent the mean estimate of the regression line of the individual (blue) and hierarchical model (in green). The thinner lines are regression lines of individual samples from the posterior and give us a sense of how variable the estimates are.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
url = "https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/radon.csv?raw=true"
data = pd.read_csv(url)
county_names = data.county.unique()
county_idx = data["county_code"].values
!pip install arviz
import arviz
data[["county", "log_radon", "floor"]].head()
# takes about 45 minutes
indiv_traces = {}
for county_name in county_names:
# Select subset of data belonging to county
c_data = data.loc[data.county == county_name]
c_data = c_data.reset_index(drop=True)
c_log_radon = c_data.log_radon
c_floor_measure = c_data.floor.values
with pm.Model() as individual_model:
# Intercept prior
a = pm.Normal("alpha", mu=0, sigma=1)
# Slope prior
b = pm.Normal("beta", mu=0, sigma=1)
# Model error prior
eps = pm.HalfCauchy("eps", beta=1)
# Linear model
radon_est = a + b * c_floor_measure
# Data likelihood
y_like = pm.Normal("y_like", mu=radon_est, sigma=eps, observed=c_log_radon)
# Inference button (TM)!
trace = pm.sample(progressbar=False)
indiv_traces[county_name] = trace
with pm.Model() as hierarchical_model:
# Hyperpriors
mu_a = pm.Normal("mu_alpha", mu=0.0, sigma=1)
sigma_a = pm.HalfCauchy("sigma_alpha", beta=1)
mu_b = pm.Normal("mu_beta", mu=0.0, sigma=1)
sigma_b = pm.HalfCauchy("sigma_beta", beta=1)
# Intercept for each county, distributed around group mean mu_a
a = pm.Normal("alpha", mu=mu_a, sigma=sigma_a, shape=len(data.county.unique()))
# Intercept for each county, distributed around group mean mu_a
b = pm.Normal("beta", mu=mu_b, sigma=sigma_b, shape=len(data.county.unique()))
# Model error
eps = pm.HalfCauchy("eps", beta=1)
# Expected value
radon_est = a[county_idx] + b[county_idx] * data.floor.values
# Data likelihood
y_like = pm.Normal("y_like", mu=radon_est, sigma=eps, observed=data.log_radon)
with hierarchical_model:
hierarchical_trace = pm.sample()
pm.traceplot(hierarchical_trace);
pm.traceplot(hierarchical_trace, var_names=["alpha", "beta"])
selection = ["CASS", "CROW WING", "FREEBORN"]
fig, axis = plt.subplots(1, 3, figsize=(12, 6), sharey=True, sharex=True)
axis = axis.ravel()
for i, c in enumerate(selection):
c_data = data.loc[data.county == c]
c_data = c_data.reset_index(drop=True)
z = list(c_data["county_code"])[0]
xvals = np.linspace(-0.2, 1.2)
for a_val, b_val in zip(indiv_traces[c]["alpha"][::10], indiv_traces[c]["beta"][::10]):
axis[i].plot(xvals, a_val + b_val * xvals, "b", alpha=0.05)
axis[i].plot(
xvals,
indiv_traces[c]["alpha"][::10].mean() + indiv_traces[c]["beta"][::10].mean() * xvals,
"b",
alpha=1,
lw=2.0,
label="individual",
)
for a_val, b_val in zip(hierarchical_trace["alpha"][::10][z], hierarchical_trace["beta"][::10][z]):
axis[i].plot(xvals, a_val + b_val * xvals, "g", alpha=0.05)
axis[i].plot(
xvals,
hierarchical_trace["alpha"][::10][z].mean() + hierarchical_trace["beta"][::10][z].mean() * xvals,
"g",
alpha=1,
lw=2.0,
label="hierarchical",
)
axis[i].scatter(
c_data.floor + np.random.randn(len(c_data)) * 0.01,
c_data.log_radon,
alpha=1,
color="k",
marker=".",
s=80,
label="original data",
)
axis[i].set_xticks([0, 1])
axis[i].set_xticklabels(["basement", "first floor"])
axis[i].set_ylim(-1, 4)
axis[i].set_title(c)
if not i % 3:
axis[i].legend()
axis[i].set_ylabel("log radon level")
hier_a = hierarchical_trace["alpha"].mean(axis=0)
hier_b = hierarchical_trace["beta"].mean(axis=0)
indv_a = [indiv_traces[c]["alpha"].mean() for c in county_names]
indv_b = [indiv_traces[c]["beta"].mean() for c in county_names]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(
111,
xlabel="Intercept",
ylabel="Floor Measure",
title="Hierarchical vs. Non-hierarchical Bayes",
xlim=(0.25, 2),
ylim=(-2, 1.5),
)
ax.scatter(indv_a, indv_b, s=26, alpha=0.4, label="non-hierarchical")
ax.scatter(hier_a, hier_b, c="red", s=26, alpha=0.4, label="hierarchical")
for i in range(len(indv_b)):
ax.arrow(
indv_a[i],
indv_b[i],
hier_a[i] - indv_a[i],
hier_b[i] - indv_b[i],
fc="k",
ec="k",
length_includes_head=True,
alpha=0.4,
head_width=0.02,
)
ax.legend();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hard to see whether it is still learning...
|
<ASSISTANT_TASK:>
Python Code:
channel = m.monitor.channels["valid_y_nll"]
hl.Curve(zip(channel.epoch_record, channel.val_record),label="valid_y_nll")
channel = m.monitor.channels["valid_y_nll"]
plt.plot(channel.epoch_record, channel.val_record)
ch1 = m.monitor.channels["valid_y_nll"]
ch2 = m.monitor.channels["train_y_nll"]
hl.Curve(zip(ch1.epoch_record[-40:], ch1.val_record[-40:]),label="valid_y_nll")
hl.Curve(zip(ch2.epoch_record[-40:], ch2.val_record[-40:]),label="train_y_nll")
m = pylearn2.utils.serial.load(
"/disk/scratch/neuroglycerin/models/continue_hopeful_recent.pkl")
make_curves(m,"valid_objective","valid_y_nll","train_y_nll")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Batch download the data
Step2: Compare to a Gaia full catalog source (download from previous notebook or manually)
Step3: 2. Compare TGAS and GaiaSource files
Step4: TGAS is just the subset with parallaxes available, while Gaia source has only Positions and Magnitudes but for billions of sources
Step5: For a single file, TGAS covers much more area. The file sizes are capped at 40 Mb.
Step6: 3. Explore!
Step7: This takes a finite amount of RAM, but should be fine for modern laptops.
Step8: So 2.05+ million sources with 59 "features" or columns of metadata.
Step9: Only one variable star of all the TGAS sample, about what you'd expect.
|
<ASSISTANT_TASK:>
Python Code:
#! cat /Users/gully/.ipython/profile_default/startup/start.ipy
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import os
for i in range(16):
fn = 'http://cdn.gea.esac.esa.int/Gaia/tgas_source/csv/TgasSource_000-000-{:03d}.csv.gz'.format(i)
executable = 'wget '+fn
print(executable)
os.system(executable) ## Uncomment to actually download
#! mv Tgas* ../data
#! gzip -d ../data/Tgas*
! ls ../data/Tgas*
#! wget http://cdn.gea.esac.esa.int/Gaia/gaia_source/csv/GaiaSource_000-000-000.csv.gz
#! mv GaiaSource_000-000-000.csv.gz ../data/
! ls ../data/GaiaSource*
import pandas as pd
%time t000 = pd.read_csv('../data/TgasSource_000-000-000.csv')
%time g000 = pd.read_csv('../data/GaiaSource_000-000-000.csv')
set(t000.columns) - set(g000.columns)
len(t000), len(g000)
p_i = t000.parallax == t000.parallax
tp000 = t000[p_i]
p_i.sum()
p_i = g000.parallax == g000.parallax
gp000 = g000[p_i]
p_i.sum()
sns.set_color_codes()
plt.plot(tp000.ra[0:2], tp000.dec[0:2], 'b.', label='TGAS') # Hack to get bold labels
plt.plot(gp000.ra[0:2], gp000.dec[0:2], 'r.', label='Gaia Source')
plt.plot(tp000.ra.values, tp000.dec.values, 'b.', alpha=0.1)
plt.plot(gp000.ra.values, gp000.dec.values, 'r.', alpha=0.1)
plt.legend(loc='lower left')
df_list = []
for i in range(16):
df_list.append(pd.read_csv('../data/TgasSource_000-000-{:03d}.csv'.format(i)))
tt = pd.concat(df_list, ignore_index=True)
t000.shape
tt.shape
len(tt.source_id.unique())
plt.plot(tt.parallax, tt.parallax_error, '.', alpha=0.005)
plt.xscale('log')
bins = np.arange(-50, 200, 3)
sns.distplot(tt.parallax, bins=bins,kde=False)
plt.yscale('log')
sns.distplot(tt.parallax_error)
bins = np.arange(0, 160, 2)
sns.distplot(tt.astrometric_n_obs_ac, bins=bins, kde=False)
sns.distplot(tt.astrometric_n_bad_obs_ac, bins=bins, kde=False)
sns.distplot(tt.astrometric_n_good_obs_ac, bins=bins, kde=False)
sns.distplot(tt.phot_g_mean_mag)
bins = np.arange(0,40,1)
sns.distplot(tt.matched_observations, bins=bins,kde=False)
tt.iloc[0]
gi = tt.astrometric_delta_q == tt.astrometric_delta_q
bins= np.arange(0,500, 5)
sns.distplot(tt.astrometric_delta_q[gi], bins=bins, kde=False)
plt.yscale('log')
tt.phot_variable_flag.unique()
vi = tt.phot_variable_flag == 'VARIABLE'
vi.sum(), len(vi)
tt[vi]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pre-process and vectorize the documents
Step2: We use the WordNet lemmatizer from NLTK. A lemmatizer is preferred over a stemmer in this case because it produces more readable words. Output that is easy to read is very desirable in topic modelling.
Step3: We find bigrams in the documents. Bigrams are sets of two adjacent words. Using bigrams we can get phrases like "machine_learning" in our output (spaces are replaced with underscores); without bigrams we would only get "machine" and "learning".
Step4: We remove rare words and common words based on their document frequency. Below we remove words that appear in less than 20 documents or in more than 50% of the documents. Consider trying to remove words only based on their frequency, or maybe combining that with this approach.
Step5: Finally, we transform the documents to a vectorized form. We simply compute the frequency of each word, including the bigrams.
Step6: Let's see how many tokens and documents we have to train on.
Step7: Training
Step8: We can compute the topic coherence of each topic. Below we display the average topic coherence and print the topics in order of topic coherence.
|
<ASSISTANT_TASK:>
Python Code:
# Read data.
import os
# Folder containing all NIPS papers.
data_dir = 'nipstxt/'
# Folders containin individual NIPS papers.
yrs = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
dirs = ['nips' + yr for yr in yrs]
# Read all texts into a list.
docs = []
for yr_dir in dirs:
files = os.listdir(data_dir + yr_dir)
for filen in files:
# Note: ignoring characters that cause encoding errors.
with open(data_dir + yr_dir + '/' + filen, errors='ignore') as fid:
txt = fid.read()
docs.append(txt)
# Tokenize the documents.
from nltk.tokenize import RegexpTokenizer
# Split the documents into tokens.
tokenizer = RegexpTokenizer(r'\w+')
for idx in range(len(docs)):
docs[idx] = docs[idx].lower() # Convert to lowercase.
docs[idx] = tokenizer.tokenize(docs[idx]) # Split into words.
# Remove numbers, but not words that contain numbers.
docs = [[token for token in doc if not token.isnumeric()] for doc in docs]
# Remove words that are only one character.
docs = [[token for token in doc if len(token) > 1] for doc in docs]
# Lemmatize the documents.
from nltk.stem.wordnet import WordNetLemmatizer
# Lemmatize all words in documents.
lemmatizer = WordNetLemmatizer()
docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]
# Compute bigrams.
from gensim.models import Phrases
# Add bigrams and trigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if '_' in token:
# Token is a bigram, add to document.
docs[idx].append(token)
# Remove rare and common tokens.
from gensim.corpora import Dictionary
# Create a dictionary representation of the documents.
dictionary = Dictionary(docs)
# Filter out words that occur less than 20 documents, or more than 50% of the documents.
dictionary.filter_extremes(no_below=20, no_above=0.5)
# Vectorize data.
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in docs]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
# Train LDA model.
from gensim.models import LdaModel
# Set training parameters.
num_topics = 10
chunksize = 2000
passes = 20
iterations = 400
eval_every = None # Don't evaluate model perplexity, takes too much time.
# Make a index to word dictionary.
temp = dictionary[0] # This is only to "load" the dictionary.
id2word = dictionary.id2token
%time model = LdaModel(corpus=corpus, id2word=id2word, chunksize=chunksize, \
alpha='auto', eta='auto', \
iterations=iterations, num_topics=num_topics, \
passes=passes, eval_every=eval_every)
top_topics = model.top_topics(corpus, num_words=20)
# Average topic coherence is the sum of topic coherences of all topics, divided by the number of topics.
avg_topic_coherence = sum([t[1] for t in top_topics]) / num_topics
print('Average topic coherence: %.4f.' % avg_topic_coherence)
from pprint import pprint
pprint(top_topics)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's go over the columns
Step2: <a id='pipeline'></a>
Step5: Filtering out ANNOUNCED targets
Step9: Filtering out PROPOSED targets
|
<ASSISTANT_TASK:>
Python Code:
# import the dataset
from quantopian.interactive.data.eventvestor import mergers_and_acquisitions_free as dataset
# or if you want to import the free dataset, use:
#from quantopian.data.eventvestor import buyback_auth_free
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
import matplotlib.pyplot as plt
# Let's use blaze to understand the data a bit using Blaze dshape()
dataset.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
dataset.count()
dataset.asof_date.min()
# Let's see what the data looks like. We'll grab the first three rows.
dataset[:3]
dataset.is_crossboarder.distinct()
# get the sid for MSFT
symbols('MSFT')
# knowing that the MSFT sid is 5061:
msft = dataset[dataset.sid==5061]
msft[:5]
# Import necessary Pipeline modules
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.factors import AverageDollarVolume
from quantopian.pipeline.classifiers.eventvestor import (
AnnouncedAcqTargetType,
ProposedAcqTargetType,
)
from quantopian.pipeline.factors.eventvestor import (
BusinessDaysSinceAnnouncedAcquisition,
BusinessDaysSinceProposedAcquisition
)
from quantopian.pipeline.filters.eventvestor import (
IsAnnouncedAcqTarget
)
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
def screen_ma_targets_by_type(target_type='cash'):
target_type:
(string) Available options are 'cash', 'stock', 'mixed', 'all'.
This will filter all offers of type target_type.
if target_type == 'all':
return (~IsAnnouncedAcqTarget())
else:
if target_type == 'cash':
filter_offer = 'Cash Offer'
elif target_type == 'stock':
filter_offer = 'Stock Offer'
elif target_type == 'mixed':
filter_offer = 'Mixed Offer'
return (~AnnouncedAcqTargetType().eq(filter_offer))
def screen_ma_targets_by_days(days=200):
days:
(int) Filters out securities that have had an announcement
less than X days. So if days is 200, all securities
that have had an announcement less than 200 days ago will be
filtered out.
b_days = BusinessDaysSinceAnnouncedAcquisition()
return ((b_days > days) | b_days.isnull())
pipe = Pipeline(
columns={
'AnnouncedAcqTargetType': AnnouncedAcqTargetType(),
'BusinessDays': BusinessDaysSinceAnnouncedAcquisition()
},
screen=(screen_ma_targets_by_days(60) &
screen_ma_targets_by_type(target_type='stock'))
)
output = run_pipeline(pipe, start_date='2016-07-28', end_date='2016-07-28')
Similar functions for M&A Proposals (different from Announcements)
def screen_ma_proposal_targets_by_type(target_type='cash'):
target_type:
(string) Available options are 'cash', 'stock', 'mixed', 'all'.
This will filter all offers of type target_type.
if target_type == 'all':
return (ProposedAcqTargetType().isnull() &
BusinessDaysSinceProposedAcquisition().isnull())
if target_type == 'cash':
filter_offer = 'Cash Offer'
elif target_type == 'stock':
filter_offer = 'Stock Offer'
elif target_type == 'mixed':
filter_offer = 'Mixed Offer'
return (~ProposedAcqTargetType().eq(filter_offer))
def screen_ma_proposal_targets_by_days(days=200):
days:
(int) Filters out securities that have had an announcement
less than X days. So if days is 200, all securities
that have had an announcement less than 200 days ago will be
filtered out.
b_days = BusinessDaysSinceProposedAcquisition()
return ((b_days > days) | b_days.isnull())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 그러나 NumPy는 벡터화 연산을 지원하므로 다음과 같이 덧셈 연산 하나로 끝난다. 위에서 보인 선형 대수의 벡터 기호를 사용한 연산과 코드가 완전히 동일하다.
Step2: 연산 속도도 벡터화 연산이 훨씬 빠른 것을 볼 수 있다.
Step3: NumPy 곱셉의 경우에는 행렬의 곱, 즉 내적(inner product, dot product)의 정의와 다르다. 따라서 이 경우에는 별도로 dot이라는 명령 혹은 메서드를 사용해야 한다.
Step4: 비교 연산도 마찬가지로 element-wise 연산이다. 따라서 벡터 혹은 행렬 전체의 원소가 모두 같아야 하는 선형 대수의 비교 연산과는 다르다.
Step5: 만약 배열 전체를 비교하고 싶다면 array_equal 명령을 사용한다.
Step6: 만약 NumPy 에서 제공하는 지수 함수, 로그 함수 등의 수학 함수를 사용하면 element-wise 벡터화 연산을 지원한다.
Step7: 1 + 2
Step8: 만약 NumPy에서 제공하는 함수를 사용하지 않으면 벡터화 연산은 불가능하다.
Step9: 브로드캐스팅
Step10: 브로드캐스팅은 더 차원이 높은 경우에도 적용된다. 다음 그림을 참조하라.
Step11: 차원 축소 연산
Step12: 연산의 대상이 2차원 이상인 경우에는 어느 차원으로 계산을 할 지를 axis 인수를 사용하여 지시한다. axis=0인 경우는 행 연산, axis=1인 경우는 열 연산 등으로 사용한다. 디폴트 값은 0이다.
Step13: axis=-1
Step14: 정렬
Step15: sort 메서드는 해당 객체의 자료 자체가 변화하는 in-place 메서드이므로 사용할 때 주의를 기울여야 한다.
Step16: 만약 자료를 정렬하는 것이 아니라 순서만 알고 싶다면 argsort 명령을 사용한다.
|
<ASSISTANT_TASK:>
Python Code:
x = np.arange(1, 101)
x
y = np.arange(101, 201)
y
%%time #c로 할 경우
z = np.zeros_like(x)
for i, (xi, yi) in enumerate(zip(x, y)):
z[i] = xi + yi
z
%%time
z = x + y
z
x = np.arange(10)
x
a = 100
a * x
x = np.arange(10)
y = np.arange(10)
x * y
np.dot(x, y)
x.dot(y)
a = np.array([1, 2, 3, 4])
b = np.array([4, 2, 2, 4])
a == b
a >= b
a = np.array([1, 2, 3, 4])
b = np.array([4, 2, 2, 4])
c = np.array([1, 2, 3, 4])
np.array_equal(a, b) #내부적으로 for문이 돌아가서 빠름
np.array_equal(a, c)
a = np.arange(5)
a
np.exp(a)
10**a
np.log(a)
np.log10(a) #상용로그를 사용
import math
a = [1, 2, 3]
math.exp(a) #이때 하고싶으면 for문을 써야됨
x = np.arange(5)
y = np.ones_like(x)
x + y
x + 1
a = np.tile(np.arange(0, 40, 11), (3, 1)).T #( 3, 1)은 뭐지 => 알게된 것 : (0, 40, 11) 이 행! 3이 열 1이 높이(깊이)
a
a1 = np.tile(np.arange(0, 40, 10), (2, 2)).T #( 3, 1)은 뭐지
a1
b = np.array([0, 1, 2])
b
a + b
a = np.arange(0, 40, 10)[:, np.newaxis] #컬럼벡터를 만들고 차원을 늘린 것임
a
a + b
x = np.array([1, 2, 3, 4])
x
np.sum(x)
x.sum() #function, method로도 존재함
x = np.array([1, 3, 2])
x.min()
x.max()
x.argmin() # index of minimum 최소값의 위치를 출력
x.argmax() # index of maximum
x = np.array([1, 2, 3, 1])
x.mean() # 평균
np.median(x) # 중앙값 1~100명. 50등의 점수
np.all([True, True, False]) # 불리언일 경우 주로 사용
np.any([True, True, False]) # 하나라도 존재하면 ㅇㅋ
a = np.zeros((100, 100), dtype=np.int)
a
np.any(a != 0) # 0이 아닌 것이 하나라도 있습니까?
np.all(a == a) #equal array와 비슷함
a = np.array([1, 2, 3, 2])
b = np.array([2, 2, 3, 2])
c = np.array([6, 4, 4, 5])
((a <= b) & (b <= c)).all() #all도 메쏘드로 출력 가능
x = np.array([[1, 1], [2, 2]])
x
x.sum()
x.sum(axis=0) # columns (first dimension) row만 남기고 column을 없애라
x.sum(axis=1) # rows (second dimension)
y = np.array([[1, 2, 3], [5, 6, 1]])
np.median(y, axis=-1) # last axis
np.median(y, axis=0)
a = np.array([[4, 3, 5], [1, 2, 1]])
a
np.sort(a)
np.sort(a, axis=0)
np.sort(a, axis=1)
a.sort(axis=1)
a
a = np.array([4, 3, 1, 2])
j = np.argsort(a)
j
a[j] # array indexing! 을 하면 값을 알 수 있음
a = np.array([80, 72, 99, 35])
j = np.sort(a)
j
j1 = np.argsort(a)
j1
a = np.array([ 72, 99, 35])
j = np.argsort(a)
j
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Writing and running programs in TensorFlow has the following steps
Step2: As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
Step3: To summarize, remember to initialize your variables, create a session and run the operations inside the session.
Step4: When we first defined x we did not have to specify a value for it. A placeholder is simply a variable that you will assign data to later when running the session. We say that you feed data to these placeholders when running the session.
Step6: Sigmoid in TensorFlow
Step8: Computing the Cost
Step10: One Hot Encodings
Step11: Building a Neural Network with TensorFlow
Step13: So, what does this mean? In our data set, there are 55,000 examples of handwritten digits from zero to nine. Each example is a 28x28 pixel image flattened in an array with 784 values representing each pixel’s intensity.
Step14: Note
Step16: Initialize Parameters
Step18: As expected, the parameters haven't been evaluated yet.
Step20: Compute Cost
Step23: Backward propagation & parameter updates
|
<ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.examples.tutorials.mnist import input_data
%matplotlib inline
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c) #Question: What should the output be?
sess = tf.Session()
print(sess.run(c))
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
X = tf.constant(np.random.randn(3,1), name="X")
W = tf.constant(np.random.randn(4,3), name="W")
b = tf.constant(np.random.randn(4,1), name="b")
Y = tf.add(tf.matmul(W, X), b)
sess = tf.Session()
result = sess.run(Y)
sess.close()
print("Result = " + str(result))
def sigmoid(z):
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
x = tf.placeholder(tf.float32, name="x")
sigmoid = tf.sigmoid(x)
with tf.Session() as sess:
result = sess.run(sigmoid, feed_dict={x: z})
return result
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
def cost(logits, labels):
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Returns:
cost -- runs the session of the cost (formula (2))
z = tf.placeholder(tf.float32, name="z")
y = tf.placeholder(tf.float32, name="y")
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y)
sess = tf.Session()
cost = sess.run(cost, feed_dict={z: logits, y: labels})
sess.close()
return cost
logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
def one_hot_matrix(labels, C):
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
C = tf.constant(C, name="C")
one_hot_matrix = tf.one_hot(labels, C, 1)
sess = tf.Session()
one_hot = sess.run(one_hot_matrix)
sess.close()
return one_hot
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = " + str(one_hot))
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
X_train = mnist.train.images
Y_train = mnist.train.labels
X_test = mnist.test.images
Y_test = mnist.test.labels
print ("Number of training examples = " + str(X_train.shape[0]))
print ("Number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
def create_placeholders(num_features, num_classes):
Creates the placeholders for the tensorflow session.
Arguments:
num_features -- scalar, size of an image vector (num_px * num_px = 28 * 28 = 748)
num_classes -- scalar, number of classes (from 0 to 9, so -> 10)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
X = tf.placeholder(tf.float32, shape=[None, num_features])
Y = tf.placeholder(tf.float32, shape=[None, num_classes])
return X, Y
X, Y = create_placeholders(X_train.shape[1], Y_train.shape[1])
print ("X = " + str(X))
print ("Y = " + str(Y))
def initialize_parameters(num_features, num_classes):
Initializes parameters to build a neural network with tensorflow.
Arguments:
num_features -- scalar, size of an image vector (num_px * num_px = 28 * 28 = 748)
num_classes -- scalar, number of classes (from 0 to 9, so -> 10)
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
W1 = tf.get_variable("W1", [num_features, 25], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable("b1", [1,25], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [25, 12], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable("b2", [1,12], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [12, num_classes], initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.get_variable("b3", [1, num_classes], initializer = tf.zeros_initializer())
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
with tf.Session() as sess:
parameters = initialize_parameters(X_train.shape[1], Y_train.shape[1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
def forward_propagation(X, parameters):
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (Number of examples, number of features)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
# Numpy Equivalents:
Z1 = tf.add(tf.matmul(X, W1), b1) # Z1 = np.dot(X, W1) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(A1, W2), b2) # Z2 = np.dot(A1, W2) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(A2, W3), b3) # Z3 = np.dot(A2, W3) + b3
return Z3
def compute_cost(Z3, Y):
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y))
return cost
def random_mini_batches(X, Y, mini_batch_size):
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data
Y -- true "label" vector
mini_batch_size - size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
m = X.shape[0] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :]
shuffled_Y = Y[permutation, :]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 200, minibatch_size = 32, print_cost = True):
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set features
Y_train -- training set class values
X_test -- test set features
Y_test -- test set class values
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
(m, num_features) = X_train.shape # (m : number of examples in the train set, n_features: input size)
num_classes = Y_train.shape[1] # n_classes : output size
costs = [] # To keep track of the cost
# Create placeholders
X, Y = create_placeholders(num_features, num_classes)
# Initialize parameters
parameters = initialize_parameters(num_features, num_classes)
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
minibatches = random_mini_batches(X_train, Y_train, minibatch_size)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 10 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3,1), tf.argmax(Y,1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
parameters = model(X_train, Y_train, X_test, Y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Zunächst werden die Daten geladen.
Step2: So sehen die Daten aus
Step3: Sehen wir uns die Verteilung der Klassen an
Step4: Wenn die Klassen vorher auf 0 und 1 rediziert werden, sieht das Ergebnis etwas anders aus
Step5: Als naechstes soll die Klassenhierarchie betrachtet werden. Sehen wir uns zunaechst die Surchschnittliche Summe der Klassenlabels innerhalb der Oberklassen an.
Step6: Betrachten wir nun Korrelationen zwischen Klassen. Dazu werden die summierten Abstände zwischen den Werten von jeweils zwei Klassen aufgetragen. Blau
|
<ASSISTANT_TASK:>
Python Code:
from tweet import Tweet
import numpy as np
from csv_handling import load_tweet_csv
import matplotlib.pyplot as plt
tweets = load_tweet_csv("train.csv", use_pickle=False, use_cache=False)
len(tweets)
tweets[:10]
[t["tweet"] for t in tweets[:5]]
fig, ax = plt.subplots()
classes = Tweet.get_all_keys()
y_pos = np.arange(len(classes))
classes_sum = [0]*len(classes)
for tweet in tweets:
for i in range(len(classes)):
classes_sum[i] += tweet[classes[i]]
ax.barh(y_pos, classes_sum, align='center',
color='green', ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(classes)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Summe aller vergebenen Labelwahrscheinlichkeiten')
ax.set_title('Vorkommen der einzelnen Klassen')
plt.show()
fig, ax = plt.subplots()
classes_bin_sum = [0] * len(classes)
for tweet in tweets:
for i in range(len(classes)):
classes_bin_sum[i] += 1 if tweet[classes[i]] > 0.5 else 0
ax.barh(y_pos, classes_bin_sum, align='center',
color='green', ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(classes)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Summe aller gerundeten vergebenen Labelwahrscheinlichkeiten')
ax.set_title('Vorkommen der einzelnen Klassen, binaer betrachtet')
plt.show()
fig, ax = plt.subplots()
class_groups = ["k", "s", "w"]
y_pos = np.arange(len(class_groups))
main_classes_sum = [0] * len(class_groups)
for tweet in tweets:
for tweet_class in classes:
main_classes_sum[class_groups.index(tweet_class[0])] += tweet[tweet_class]
main_classes_sum = [grp/len(tweets) for grp in main_classes_sum]
ax.barh(y_pos, main_classes_sum, align='center',
color='green', ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(class_groups)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Druchschnittliche Summe der Labels pro Oberklasse')
ax.set_title('Klassenhierarchie, Summe der Unterklassen')
plt.show()
fig, ax = plt.subplots(figsize=(8, 8))
classes = Tweet.get_all_keys()
y_pos = np.arange(len(classes))
error = np.full((len(classes), len(classes)), 0, dtype=int)
for tweet in tweets[:100]:
for i, class_i in enumerate(classes):
for j, class_j in enumerate(classes):
error[i, j] += abs((tweet[class_i] - tweet[class_j]))
ax.set_yticks(y_pos)
ax.set_yticklabels(classes)
ax.set_xticks(y_pos)
ax.set_xticklabels(classes)
ax.set_title('Korrelation zwischen Klassen')
plt.imshow(error, interpolation='nearest')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Partie 0 - modules recommandés et connexion à l'API
Step2: Partie 1 - Trouver les gares accessibles via la SNCF
Step3: Les trajets depuis la Gare de Lyon
Step4: Et l'inverse
Step5: Question 2
Step6: Quels sont les horaires de départ de ces trains ?
Step7: Parmi ces trains, combien de trains ont pour destination finale Lyon et qui partent le 17 novembre ?
Step8: C'est quand qu'on va où ?
Step9: Et les correspondances ?
Step10: Exemple
Step11: Exemple
Step12: Question 5
Step13: On représente tout ça sur un graphique
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# !!!!! Attention à bien mettre votre token ici !!!!!
token_auth = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
import keyring, os
if "XXXXXX" in token_auth:
token_auth = keyring.get_password("sncf", "key")
import pandas as pd
import requests
def page_gares(numero_page) :
return requests.get('https://api.sncf.com/v1/coverage/sncf/stop_areas?start_page={}'.format(numero_page),
auth=(token_auth, ''))
######################################
# on commence par la première page qui nous donne le nombre de résultats par page ainsi que le nombre total de résultats
page_initiale = page_gares(0)
item_per_page = page_initiale.json()['pagination']['items_per_page']
total_items = page_initiale.json()['pagination']['total_result']
dfs = []
# on fait une boucle sur toutes les pages suivantes
print_done = {}
for page in range(int(total_items/item_per_page)+1) :
stations_page = page_gares(page)
ensemble_stations = stations_page.json()
if 'stop_areas' not in ensemble_stations:
# pas d'arrêt
continue
# on ne retient que les informations qui nous intéressent
for station in ensemble_stations['stop_areas']:
station['lat'] = station['coord']['lat']
station["lon"] = station['coord']['lon']
if 'administrative_regions' in station.keys() :
for var_api, var_df in zip(['insee','name','label','id','zip_code'],
['insee','region','label_region','id_region','zip_code']) :
try:
station[var_df] = station['administrative_regions'][0][var_api]
except KeyError:
if var_api not in print_done:
print("key '{0}' not here but {1}".format(var_api,
",".join(station['administrative_regions'][0].keys())))
print_done[var_api] = var_api
[station.pop(k,None) for k in ['coord','links','administrative_regions', 'type', 'codes']]
stations = ensemble_stations['stop_areas']
try:
dp = pd.DataFrame(stations)
except Exception as e:
# La SNCF modifie parfois le schéma de ses données.
# On affiche station pour avoir une meilleure idée que l'erreur retournée par pandas
raise Exception("Problème de données\n{0}".format(stations)) from e
dfs.append(dp)
if page % 10 == 0:
print("je suis à la page", page, "---", dp.shape)
import pandas
df = pandas.concat(dfs)
df.to_csv("./ensemble_gares.csv")
print(df.shape)
df.head()
df = pd.read_csv("./ensemble_gares.csv", encoding = "ISO-8859-1")
print(df.columns)
print(df.shape)
# Exemple des informations sur une gare
df.iloc[317]
# on crée un dictionnaire des correspondances entre les noms et les codes des gares
dict_label_gare_code = df[['label','id']].set_index('label').to_dict()['id']
dict_nom_gare_code = df[['name','id']].set_index('name').to_dict()['id']
print(df.columns)
# graphique dans le plan des gares
%matplotlib inline
import matplotlib.pyplot as plt
lng_var = df[(df['lat']>35) & (df['lat']<60)]["lon"].tolist()
lat_var = df[(df['lat']>35) & (df['lat']<60)]["lat"].tolist()
plt.scatter(x = lng_var , y = lat_var,marker = "o")
##### une fonction qui sera utile pour calculer des temps
from datetime import datetime, timedelta
def convertir_en_temps(chaine) :
''' on convertit en date la chaine de caractères de l API'''
return datetime.strptime(chaine.replace('T',''),'%Y%m%d%H%M%S')
def convertir_en_chaine(dt) :
''' on convertit en chaîne de caractères un datetime'''
return datetime.strftime(dt, '%Y%m%dT%H%M%S')
# informations sur le trajet qu'on choisit dans le futur
# l'API ne retourne pas de résultatq très loin dans le passé
now = datetime.now()
dt = now + timedelta(14) # dans deux semaines
date_depart = convertir_en_chaine(dt)
gare_depart = 'stop_area:OCE:SA:87686006'
gare_arrivee = 'stop_area:OCE:SA:87722025'
# ensemble des départs
paris_lyon = requests.get('https://api.sncf.com/v1/coverage/sncf/journeys?'\
'from={}&to={}&datetime={}'.format(gare_depart, gare_arrivee, date_depart), \
auth=(token_auth, '')).json()
date_depart
# les gares du chemin entre Paris et Lyon sur ce trajet
# ainsi que le temps d'arrêt
session = paris_lyon['journeys'][0]['sections'][1]
if "stop_date_times" in session:
for i in session['stop_date_times'] :
print(i['stop_point']['name'],
convertir_en_temps(i['departure_date_time'])-convertir_en_temps(i['arrival_date_time']),"minutes d'arrêt")
### les trains qui partent autour de 19h00
departs_paris = requests.get('https://api.sncf.com/v1/coverage/sncf/stop_points/stop_point:OCE:SP:'\
'TGV-87686006/departures?from_datetime={}'.format(date_depart) ,
auth=(token_auth, '')).json()
# Nombre de trains que l'API renvoit à partir de cet horaire-là
print(len(departs_paris['departures']))
for i in range(len(departs_paris['departures'])) :
print(departs_paris['departures'][i]['stop_date_time']['departure_date_time'])
nombre_trains_pour_lyon = 0
for depart in departs_paris['departures'] :
if "Lyon" in depart['display_informations']['direction'] :
if convertir_en_temps(depart['stop_date_time']['arrival_date_time']) > convertir_en_temps(date_depart) and \
convertir_en_temps(depart['stop_date_time']['arrival_date_time']) < datetime(2016,11,18,0,0,0):
nombre_trains_pour_lyon += 1
print("le prochain départ pour Lyon sera le", convertir_en_temps(depart['stop_date_time']['arrival_date_time']))
print("Il y a" , nombre_trains_pour_lyon, "train(s) pour Lyon dans les trains proposés",
"par l'API qui partent encore le 17 novembre")
# on crée deux fonctions :
def trouver_destination_tgv(origine, datetime) :
'''Permet d avoir les 10 prochains départs d une gare donnée '''
return requests.get('https://api.sncf.com/v1/coverage/sncf/stop_points/{}/' \
'departures?from_datetime={}'.format(origine, datetime) ,
auth=(token_auth, '')).json()
def trouver_trajet_dispo_max_heure(gare_depart, date_heure_depart, date_heure_max) :
''' Permet d avoir toutes les informations sur des trajets partant d une gare entre une date X et une date Y '''
destinations = []
# on interroge l'API tant qu'il renvoie des informations sur les trains partant de Gare de lyon
while convertir_en_temps(date_heure_depart) < convertir_en_temps(date_heure_max) :
# on prend toutes les destinations qui partent à partir d'une certaine heure
destinations = destinations + trouver_destination_tgv(gare_depart, date_heure_depart)['departures']
nombre_resultats = trouver_destination_tgv(gare_depart, date_heure_depart)['pagination']['items_on_page']
# on trouve l'heure max de la première série de 10 solutions que l'application renvoie
# on remplace l'heure qu'on cherche par celle là
date_heure_depart = trouver_destination_tgv(gare_depart,
date_heure_depart)['departures'][nombre_resultats-1]['stop_date_time']['departure_date_time']
return destinations
# on trouve l'ensemble des trajets dont le départ est compris entre deux horaires
# informations sur le trajet qu'on choisit dans le futur
# l'API ne retourne pas de résultatq très loin dans le passé
now = datetime.now()
if now.hour < 6:
# pas trop tôt
now += timedelta(hours=4)
dt = now + timedelta(14) # dans deux semaines
date_heure = convertir_en_chaine(dt)
max_date_heure = convertir_en_chaine(dt + timedelta(hours=4))
print("entre", date_heure, "et", max_date_heure)
gare_initiale = 'stop_point:OCE:SP:TGV-87686006'
# on demande à avoir tous les trajets partant de gare de lyon entre 18h et 20h
destinations_depuis_paris_max_20h = trouver_trajet_dispo_max_heure(gare_initiale, date_heure, max_date_heure)
# on veut supprimer ceux pour lesquels le départ est après 20h00
dictionnaire_destinations = {}
i = 0
for depart in destinations_depuis_paris_max_20h :
print(depart['display_informations']['direction'],depart['stop_date_time']['departure_date_time'])
if convertir_en_temps(depart['stop_date_time']['departure_date_time']) < convertir_en_temps(max_date_heure) :
i += 1
dictionnaire_destinations[i] = depart
print("Je peux prendre", len(dictionnaire_destinations.keys()),
"trains qui partent entre 18h et 20h de Gare de Lyon le 17 novembre 2016")
# on cherche celui qui arrive le plus tôt à sa destination
def trouver_info_trajet(dep, arr, heure) :
res = requests.get('https://api.sncf.com/v1/coverage/sncf/journeys?from={}&to={}&datetime={}'.format(dep,arr,heure), \
auth=(token_auth, '')).json()
if 'journeys' not in res:
if 'error' in res and "no solution" in res["error"]['message']:
print("Pas de solution pour '{0} --> '{1}' h: {2}.".format(dep, arr, heure))
return None
return res['journeys'][0]
# on initiale l'heure à la fin de la journée : on veut réduire cette variable au maximum
# on veut 6h après le départ
heure_minimale = dt + timedelta(hours=8)
destination_la_plus_rapide = None
print("heure_minimale", heure_minimale, " len ", len(dictionnaire_destinations))
# parmi toutes les destinations possibles, on recherche le train qui arrive le plus tôt à sa destination finale
for code, valeurs in dictionnaire_destinations.items() :
''' on prend le code de la gare'''
code_destination = dictionnaire_destinations[code]['route']['direction']['id']
''' on regarde à quelle heure arrive le train'''
trajet = trouver_info_trajet('stop_area:OCE:SA:87686006',code_destination,
dictionnaire_destinations[code]['stop_date_time']['arrival_date_time'])
if trajet is None:
continue
if heure_minimale > convertir_en_temps(trajet['arrival_date_time']) :
heure_minimale = convertir_en_temps(trajet['arrival_date_time'])
destination_la_plus_rapide = dictionnaire_destinations[code]
if destination_la_plus_rapide is not None:
print(destination_la_plus_rapide['display_informations']['direction'], heure_minimale)
else:
print("pas de résultat")
# on va trouver toutes les gares qui sont sur les trajets des trains retenus donc atteignables sans correspondance
def trouver_toutes_les_gares_du_trajet(gare_depart, gare_arrivee_finale, horaire_depart) :
return requests.get('https://api.sncf.com/v1/coverage/sncf/journeys?from={}&to={}' \
'&datetime={}'.format(gare_depart,gare_arrivee_finale,horaire_depart), \
auth=(token_auth, '')).json()
# Exemple pour la première gare de la liste
gare_depart = dictionnaire_destinations[1]['stop_point']['id']
gare_arrivee = dictionnaire_destinations[1]['route']['direction']['id']
horaire_train = dictionnaire_destinations[1]['stop_date_time']['arrival_date_time']
######################
trajet_recherche = trouver_toutes_les_gares_du_trajet(gare_depart,gare_arrivee,horaire_train)
session = trajet_recherche['journeys'][0]['sections'][0]
if "stop_date_times" in session:
for i in session['stop_date_times']:
print(i['stop_point']['name'])
#### on fait la liste des gares où on peut aller sans correspondance
liste_gares_direct = []
for x in dictionnaire_destinations.keys():
# on prend les deux gares départ + finale
gare_depart = dictionnaire_destinations[x]['stop_point']['id']
gare_arrivee = dictionnaire_destinations[x]['route']['direction']['id']
horaire_train = dictionnaire_destinations[x]['stop_date_time']['arrival_date_time']
# on appelle la fonction définie précédemment
trajet_recherche = trouver_toutes_les_gares_du_trajet(gare_depart,gare_arrivee,horaire_train)
if 'error' in trajet_recherche:
continue
session = trajet_recherche['journeys'][0]['sections'][0]
if "stop_date_times" in session:
for i in session['stop_date_times']:
print(i['stop_point']['name'], i['arrival_date_time'])
liste_gares_direct.append(i['stop_point']['name'])
print("-------------")
#### là on a la liste des gares atteignables sans correspondance
liste_gares_direct = set(liste_gares_direct)
# pour le premier trajet gare de la liste trouvée à l'étape précédente
# on va chercher toutes les connexions des gares possibles, entre le moment de l'arrivée
# et 8 heures le lendemain matin
gare_depart = dictionnaire_destinations[1]['stop_point']['id']
gare_arrivee = dictionnaire_destinations[1]['route']['direction']['id']
horaire_train = dictionnaire_destinations[1]['stop_date_time']['arrival_date_time']
horaire_max = convertir_en_chaine(dt + timedelta(hours=8))
print("horaire_max", horaire_max)
###################### en partant de gare de lyon en direction de Perpignan
trajet_recherche = trouver_toutes_les_gares_du_trajet(gare_depart,gare_arrivee,horaire_train)
dictionnaire_correspondances = {}
for i in trajet_recherche['journeys'][0]['sections'][0]['stop_date_times']:
#print("la gare où on est descendu depuis Paris", i['stop_point']['name'])
if i['stop_point']['id'] == "stop_point:OCE:SP:TGV-87686006" :
#print("on ne prend pas la gare de Lyon - ce n'est pas une gare du trajet")
pass
else :
# on va appliquer à nouveau la fonction des trajets disponibles mais pour l'ensemble des gares
gare_dep_connexion = i['stop_point']['id']
nom_gare_dep = i['stop_point']['name']
heure_dep_connexion = i['arrival_date_time']
trajet_recherche_connexion = trouver_trajet_dispo_max_heure(gare_dep_connexion, heure_dep_connexion, horaire_max)
test_as_connexion_on_time = True
# pour chaque trajet possible depuis la gare où on est arrivé depuis paris, on va vérifier qu'on part bien
# avant 8h le lendemain
autre_gare = None
for vers_autre_gare in trajet_recherche_connexion :
heure_depart_depuis_autre_gare = vers_autre_gare['stop_date_time']['departure_date_time']
destination_trajet = vers_autre_gare['display_informations']['direction']
if convertir_en_temps(heure_depart_depuis_autre_gare) < convertir_en_temps(horaire_max) :
dictionnaire_correspondances[(nom_gare_dep,heure_depart_depuis_autre_gare)] = destination_trajet
test_as_connexion_on_time = False
# print(nom_gare_dep,heure_depart_depuis_autre_gare, "gare finale du trajet", destination_trajet)
autre_gare = vers_autre_gare
if autre_gare and test_as_connexion_on_time:
dictionnaire_correspondances[(nom_gare_dep,autre_gare['stop_date_time']['departure_date_time'])] = ""
# on garde toutes les gares où on peut aller depuis une des gares de correspondance, avec un départ avant 8H
dictionnaire_correspondances
# Pour les trajets qui partent avant 8h des gares, on va chercher toutes les gares qui sont sur le trajet
gares_avec_connexion = []
for k,v in dictionnaire_correspondances.items() :
if len(v) == 0 :
pass
else :
if k[0] not in dict_nom_gare_code:
print("'{0}' pas trouvé dans {1}".format(k[0], ", ".join(
sorted(_ for _ in dict_nom_gare_code if _[:4] == k[0][:4]))))
continue
if v not in dict_label_gare_code:
print("'{0}' pas trouvé dans {1}".format(v, ", ".join(
sorted(_ for _ in dict_label_gare_code if _[:4] == v[:4]))))
continue
dep = dict_nom_gare_code[k[0]]
arr = dict_label_gare_code[v]
gares_entre_dep_arr = trouver_toutes_les_gares_du_trajet(dep, arr,k[1])
for gare in gares_entre_dep_arr['journeys'][0]['sections'][1]['stop_date_times']:
#print("gare depart:", k[0], gare['stop_point']['name'])
gares_avec_connexion.append(gare['stop_point']['name'])
# la liste des gares atteignables avec 1 correspondance
gares_avec_connexion = set(gares_avec_connexion)
print(gares_avec_connexion)
# on crée la liste des gares atteignables seulement avec une correspondance (pas directement atteignable)
gares_atteintes_avec_connexion = [a for a in gares_avec_connexion if (a not in liste_gares_direct)]
print(gares_atteintes_avec_connexion)
gares_avec_connexion = []
for gare_initiale in dictionnaire_destinations:
# pour le premier trajet gare de la liste trouvée à l'étape précédente
# on va chercher toutes les connexions des gares possibles
print(gare_initiale, "/", len(dictionnaire_destinations))
gare_depart = dictionnaire_destinations[gare_initiale]['stop_point']['id']
gare_arrivee = dictionnaire_destinations[gare_initiale]['route']['direction']['id']
horaire_train = dictionnaire_destinations[gare_initiale]['stop_date_time']['arrival_date_time']
# Pour les trajets qui partent avant 8h des gares, on va chercher toutes les gares qui sont sur le trajet
trajet_recherche = trouver_toutes_les_gares_du_trajet(gare_depart, gare_arrivee, horaire_train)
dictionnaire_correspondances = {}
if 'journeys' not in trajet_recherche:
print("Pas de trajet entre '{0}' et '{1}' h={2}.".format(gare_depart, gare_arrivee, horaire_train))
continue
session = trajet_recherche['journeys'][0]['sections'][0]
if "stop_date_times" in session:
for i in session['stop_date_times']:
if i['stop_point']['id'] == "stop_point:OCE:SP:TGV-87686006" :
#print("on ne prend pas la gare de Lyon - ce n'est pas une gare du trajet")
pass
else :
# on va appliquer à nouveau la fonction des trajets disponibles mais pour l'ensemble des gares
gare_dep_connexion = i['stop_point']['id']
nom_gare_dep = i['stop_point']['name']
heure_dep_connexion = i['arrival_date_time']
trajet_recherche_connexion = trouver_trajet_dispo_max_heure(gare_dep_connexion, heure_dep_connexion, horaire_max)
test_as_connexion_on_time = True
# pour chaque trajet possible depuis la gare où on est arrivé depuis paris, on va vérifier qu'on part bien
# avant 8h le lendemain
for vers_autre_gare in trajet_recherche_connexion :
heure_depart_depuis_autre_gare = vers_autre_gare['stop_date_time']['departure_date_time']
destination_trajet = vers_autre_gare['display_informations']['direction']
if convertir_en_temps(heure_depart_depuis_autre_gare) < convertir_en_temps(horaire_max) :
dictionnaire_correspondances[(nom_gare_dep,heure_depart_depuis_autre_gare)] = destination_trajet
test_as_connexion_on_time = False
if test_as_connexion_on_time == True :
dictionnaire_correspondances[(nom_gare_dep,vers_autre_gare['stop_date_time']['departure_date_time'])] = ""
# on garde toutes les gares où on peut aller depuis une des gares de correspondance, avec un départ avant 8H
for k,v in dictionnaire_correspondances.items() :
if len(v) == 0:
continue
if k[0] not in dict_nom_gare_code:
print("'{0}' pas trouvé dans {1}".format(k[0], ", ".join(
sorted(_ for _ in dict_nom_gare_code if _[:4] == k[0][:4]))))
continue
if v not in dict_label_gare_code:
print("'{0}' pas trouvé dans {1}".format(v, ", ".join(
sorted(_ for _ in dict_label_gare_code if _[:4] == v[:4]))))
continue
dep = dict_nom_gare_code[k[0]]
arr = dict_label_gare_code[v]
gares_entre_dep_arr = trouver_toutes_les_gares_du_trajet(dep, arr, k[1])
if 'journeys' not in gares_entre_dep_arr:
print("Pas de trajet entre '{0}' et '{1}'.".format(k[0], v))
continue
session = gares_entre_dep_arr['journeys'][0]['sections'][1]
if "stop_date_times" in session:
for gare in session['stop_date_times'] :
gares_avec_connexion.append(gare['stop_point']['name'])
# la liste des gares atteignables avec 1 correspondance
gares_avec_connexion = set(gares_avec_connexion)
gares_connexion = [a for a in gares_avec_connexion if a not in liste_gares_direct]
print(gares_connexion)
######### Type de chaque gare pour le graphique
dict_type_gares = {}
for a in liste_gares_direct :
dict_type_gares[a] = "direct"
for a in gares_connexion :
dict_type_gares[a] = "correspondance"
dict_type_gares['Paris-Gare-de-Lyon'] = 'depart'
dict_type_gares
# on va les représenter grâce à la base des latitude / longitude
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.lines import Line2D
mpl.rcParams['axes.facecolor'] = "whitesmoke"
palette = plt.cm.spring
liste_couleurs = [palette(0), palette(0.5), palette(0.8)]
data_all = pd.read_csv("./ensemble_gares.csv", encoding = "ISO-8859-1")
connexions = []
lat = []
lon = []
labels = []
dict_lat = data_all.set_index('name')['lat'].to_dict()
dict_lon = data_all.set_index('name')['lon'].to_dict()
#dict_lab = data_all.set_index('name')['name'].str.replace("gare de","").to_dict()
for gare in dict_type_gares:
if gare not in dict_lat:
print("'{0}' pas trouvé dans dict_lat (problème d'accents?)".format(gare))
continue
if gare not in dict_lon:
print("'{0}' pas trouvé dans dict_lon (problème d'accents?)".format(gare))
continue
lat.append(dict_lat[gare])
lon.append(dict_lon[gare])
labels.append(gare)
%matplotlib inline
### La carte
###################################################################################################
def liste_unique(liste) :
unicite = []
for x in liste :
if x in unicite :
pass
else :
unicite.append(x)
return unicite
lab_un = liste_unique(labels)
lat_un = liste_unique(lat)
lon_un = liste_unique(lon)
fig = plt.figure(figsize=(12,10))
for label, x, y in set(zip(labels, lon, lat)) :
if dict_type_gares[label] == "direct" :
plt.annotate(label, xy = (x - 0.05, y - 0.05), horizontalalignment = 'right', size = 13)
else :
plt.annotate(label, xy = (x + 0.05, y + 0.05), horizontalalignment = 'left', size = 13)
colors = []
for x in lab_un :
if dict_type_gares[x] == "depart" :
colors.append(liste_couleurs[0])
if dict_type_gares[x] == "direct" :
colors.append(liste_couleurs[1])
if dict_type_gares[x] == "correspondance" :
colors.append(liste_couleurs[2])
plt.scatter(x = lon_un , y = lat_un, marker = "o", c = colors, s = 100, alpha = 0.5)
#### Legende
circ1 = Line2D([0], [0], linestyle="none", marker="o", alpha=0.5, markersize=10, markerfacecolor = liste_couleurs[0])
circ2 = Line2D([0], [0], linestyle="none", marker="o", alpha=0.5, markersize=10, markerfacecolor = liste_couleurs[1])
circ3 = Line2D([0], [0], linestyle="none", marker="o", alpha=0.5, markersize=10, markerfacecolor = liste_couleurs[2])
legende = plt.legend((circ1, circ2, circ3), ("Gare de départ", "Direct depuis Gare de Lyon le soir du 17 novembre",
"Avec une correspondance depuis une gare directe"), numpoints=1, loc="best")
legende.get_frame().set_facecolor('white')
plt.title("Gares atteignables avant minuit depuis la Gare de Lyon", size = 20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using pandas.read_csv, load the tab-deliminted text file of gene expression measurements (rows correspond to genes, columns correspond to bladder tumor samples), into a data frame gene_matrix_for_network_df.
Step2: Use the pandas.DataFrame.as_matrix method to make a matrix gene_matrix_for_network. Print out the dimensions of the matrix, by accessing its shape variable
Step3: Use del to delete the data frame, since we no longer need it (save memory)
Step4: Compute the 4,473 x 4,473 matrix of gene-gene Pearson correlation coefficients, using numpy.corrcoef (this function treats each row as a variable, so you don't have to do any transposing of the matrix, unlike the situation in R).
Step5: Look at the online help for numpy.fill_diagonal. Does it return the modified matrix or modify the matrix argument in place?
Step6: Set the diagonal elements of the matrix to zero, using numpy.fill_diagonal
Step7: Look at the online help for numpy.multiply. Does it do element-wise multiplication or matrix multiplication?
Step8: Look at the online help for numpy.tri. Does it modify a matrix argument in-place or return a matrix? What is in the matrix that it returns?
Step9: Set the upper-triangle of the matrix to zero, using numpy.multiply and numpy.tri
Step10: Using numpy.where, get a tuple of two numpy.arrays containing the row/col indices of the entries of the matrix for which R >= 0.75. Use array indexing to obtain the R values for these matrix entries, as a numpy array cor_coeff_values_above_thresh.
Step11: Refer to Eq. (13.5) in the assigned readding for today's class (p9 of the PDF). Obtain a numpy array of the correlation coefficients that exceeded 0.75, and Fisher-transform the correlation coefficient values to get a vector z_scores of z scores. Each of these z scores will correspond to an edge in the network, unless the absolute z score is too small such that we can't exclude the null hypothesis that the corresponding two genes' expression values are indepdenent (we will perform that check in the next step).
Step12: Delete the correlation matrix object in order to save memory (we won't need it from here on out).
Step13: Adjust the P values for multiple hypothesis testing, using the statsmodels.sandbox.stats.multicomp.multipletests function wth method="fdr_bh"
Step14: Verify that we don't need to drop any entries due to the adjusted P value not being small enough (use numpy.where and len); this should produce zero since we have M=414 samples per gene.
Step15: Read the online help for the function zip. What does it do?
Step16: We want to pass our tuple of numpy arrays containing row and column indices to Graph.TupleList; however, Graph.TupleList accepts a tuple list, not a tuple of numpy arrays. So we need to make a tuple list, using zip
Step17: Make an undirected graph from the row/column indices of the (upper-triangle) gene pairs whose correlations were above our threshold, using igraph.Graph.TupleList. Print a summary of the network, as a sanity check, using the igraph.Graph.summary method.
Step18: Plot the degree distribution on log-log scale; does it appear to be scale-free?
|
<ASSISTANT_TASK:>
Python Code:
import pandas
import scipy.stats
import matplotlib
import pylab
import numpy
import statsmodels.sandbox.stats.multicomp
import igraph
import math
gene_matrix_for_network_df =
gene_matrix_for_network =
gene_matrix_for_network.shape
help(numpy.corrcoef)
gene_matrix_for_network_cor =
help(numpy.fill_diagonal)
numpy.fill_diagonal( ## fill in here ## )
help(numpy.multiply)
help(numpy.tri)
gene_matrix_for_network_cor = numpy.multiply(gene_matrix_for_network_cor, numpy.tri(*gene_matrix_for_network_cor.shape))
inds_correl_above_thresh =
cor_coeff_values_above_thresh =
z_scores =
M = gene_matrix_for_network.shape[1]
P_values =
P_values_adj = statsmodels.sandbox.stats.multicomp.multipletests(P_values, method="fdr_bh")[1]
len(numpy.where(P_values_adj >= 0.01)[0])
help(zip)
row_col_inds_tuple_list =
## [note this can be done more elegantly using the unary "*" operator:
## row_col_inds_tuple_list = zip(*inds_correl_above_thresh)
## see how we only need to type the variable name once, if we use the unary "*" ]
final_network =
final_network.summary()
degree_dist =
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The behavior of the participants was very systematic. Except for the occasional error, whenever two words belonged to the same "animal" or "furniture" category, they would judge them as related, and unrelated otherwise. You can clearly see this when plotting the average button response (0=related, 1=unrelated) to each word-pair.
Step2: The dendrogram in figure 3 of the manuscript suggests that there is a subcluster inside the furniture items that has a significant difference in the amplitude of the N400 component. We now test if this difference is also detecable in the behavioral responses.
Step3: It seems there is very little difference in the behavioral responses between the within-cluster and between-cluster trials. A difference that doesn't cross the significance threshold.
|
<ASSISTANT_TASK:>
Python Code:
# Import Pandas data handing module
import pandas as pd
# For pretty display of tables
from IPython.display import display
# Load the data
data = pd.read_csv('data.csv', index_col=['subject', 'cue-english', 'association-english'])
data = data.sort_index()
# Transform the "raw" N400 amplitudes into distance measurements according to the equation above
data['distance'] = data['N400'] - data.groupby(['subject', 'association-english'])['N400'].transform('mean')
# Show the first 10 rows
display(data.head(10))
# Compute the average button responses for each cue-association pair
mean_button_response = data.groupby(['cue-english', 'association-english'])['button'].agg('mean').reset_index()
# Reshape the data into a cue-association word matrix
matrix = mean_button_response.pivot(index='cue-english', columns='association-english', values='button')
# Re-order the rows and columns so that animals are sorted before furniture
animals = ['elephant', 'giraffe', 'hippopotamus', 'lion', 'rhinoceros', 'tiger', 'zebra']
furniture = ['bed', 'chair', 'closet', 'couch', 'desk', 'door', 'table']
order = animals + furniture
matrix = matrix.loc[order, order]
# Display the matrix
display(matrix)
# Plot the matrix as a heatmap
from matplotlib import pyplot as plt
%matplotlib inline
plt.figure(figsize=(6, 4))
plt.matshow(matrix.values, fignum=1)
plt.xticks(range(14), matrix.columns, rotation=90)
plt.yticks(range(14), matrix.index)
plt.colorbar()
# Limit the data to just the furniture items
furniture_only = data.loc(axis=0)[:, furniture, furniture]
# Assign within-cluster and between-cluster labels, based on the subclusters in figure 3 of the manuscript
cluster1 = ['desk', 'bed', 'closet']
cluster2 = ['door', 'table', 'chair', 'couch']
furniture_only = furniture_only.assign(label = [
'within' if (a in cluster1 and b in cluster1) or (a in cluster2 and b in cluster2) else 'between'
for a, b in zip(furniture_only.index.get_level_values('cue-english'),
furniture_only.index.get_level_values('association-english'))
])
within_related = len(furniture_only.query('label=="within" and button == 0'))
within_total = len(furniture_only.query('label=="within"'))
print('Ratio of within-cluster trials with a "related" response: %d/%d=%.3f' %
(within_related, within_total, within_related / float(within_total)))
between_related = len(furniture_only.query('label=="between" and button == 0'))
between_total = len(furniture_only.query('label=="between"'))
print('Ratio of between-cluster trials with a "related" response: %d/%d=%.3f' %
(between_related, between_total, between_related / float(between_total)))
# Display the first 10 rows
display(furniture_only.head(10))
# Bring in a bridge to R for statistics
import rpy2
%load_ext rpy2.ipython.rmagic
# The R code at the bottom produces some harmless warnings that clutter up the page.
# This disables printing of the warnings. When modifying this notebook, you may want to turn
# this back on.
import warnings
warnings.filterwarnings('ignore')
# To transfer this data frame to R, the index must be reset
furniture_only = furniture_only.reset_index()
%%R -i furniture_only
library('lme4')
library('lmerTest')
# Fit a LME model to test whether there is a difference between the within-cluster and between-cluster
# button responses.
m <- lmer(button ~ label + (label | subject), data=furniture_only)
print(summary(m))
# Limit the data to just the trials for which the participant pressed the "words are related" button
related_response_only = furniture_only.query('button == 0')
print('The total number of selected trials is: %d' % len(related_response_only))
# To transfer this data frame to R, the index must be reset
related_response_only = related_response_only.reset_index()
%%R -i related_response_only
library('lme4')
library('lmerTest')
# Fit a LME model to test whether there is a difference between the within-cluster and between-cluster
# distance values.
m <- lmer(distance ~ label + (label | subject), data=related_response_only)
print(summary(m))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: B
Step2: C
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets as datasets
import seaborn as sns
iris = datasets.load_iris()
### BEGIN SOLUTION
### END SOLUTION
import sklearn.neighbors as neighbors
### BEGIN SOLUTION
### END SOLUTION
try:
train_knn
except:
assert False
else:
assert True
np.random.seed(23843)
X = np.random.random((10, 2))
y = np.random.randint(2, size = 10)
m1 = train_knn(X, y, 3)
assert m1.n_neighbors == 3
x = m1.kneighbors()[1]
assert set(x[0].tolist()) == set([3, 6, 8])
m2 = train_knn(X, y, 11)
assert m2.n_neighbors == 11
try:
m2.kneighbors()
except ValueError:
assert True
else:
assert False
try:
test_knn
except:
assert False
else:
assert True
np.random.seed(65456)
X = np.random.random((10, 2))
y = np.random.randint(2, size = 10)
m1 = train_knn(X[:8], y[:8], 3)
acc1 = test_knn(X[8:], y[8:], m1)
np.testing.assert_allclose(acc1, 0.0)
m2 = train_knn(X[2:], y[2:], 1)
acc2 = test_knn(X[:2], y[:2], m2)
np.testing.assert_allclose(acc2, 0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fit the model
Step2: We recall that the "aftershock kernel" has the form
Step3: In the following plot, for the 5 grid cells with the highest crime count, we plot the occurance of events
Step4: Can we tell the difference from a Poisson process?
Step5: For each cell, do the same, combine, and plot.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from common import *
datadir = os.path.join("//media", "disk", "Data")
#datadir = os.path.join("..", "..", "..", "..", "..", "Data")
import open_cp.logger
open_cp.logger.log_to_true_stdout()
south_side, points = load_data(datadir)
points.time_range
masked_grid = grid_for_south_side()
masked_grid2 = grid_for_south_side(xsize=100, ysize=100)
import open_cp.seppexp as sepp
trainer = sepp.SEPPTrainer(masked_grid.region(), grid_size=masked_grid.xsize)
trainer.data = points
predictor = trainer.train(iterations=100, use_corrected=True)
trainer2 = sepp.SEPPTrainer(masked_grid2.region(), grid_size=masked_grid2.xsize)
trainer2.data = points
predictor2 = trainer2.train(iterations=100, use_corrected=False)
background = predictor.background_prediction()
background.mask_with(masked_grid)
background2 = predictor2.background_prediction()
background2.mask_with(masked_grid2)
fig, ax = plt.subplots(ncols=2, figsize=(16,8))
for a in ax:
a.set_aspect(1)
a.add_patch(descartes.PolygonPatch(south_side, fc="none", ec="Black"))
mappable = ax[0].pcolormesh(*background.mesh_data(), background.intensity_matrix * 10000, cmap=yellow_to_red)
ax[0].set_title("Grid size of 250m")
cbar = fig.colorbar(mappable, ax=ax[0])
cbar.set_label("Rate $10^{-4}$")
mappable = ax[1].pcolormesh(*background2.mesh_data(), background2.intensity_matrix * 10000, cmap=yellow_to_red)
ax[1].set_title("Grid size of 100m")
cbar = fig.colorbar(mappable, ax=ax[1])
cbar.set_label("Rate $10^{-4}$")
print("Predicted omega={}, omega^-1={}, theta={}x10^-4".format(predictor.omega,
1/predictor.omega, predictor.theta*10000))
print("Predicted omega={}, omega^-1={}, theta={}x10^-4".format(predictor2.omega,
1/predictor2.omega, predictor2.theta*10000))
def points_in(region):
mask = (points.xcoords >= region.xmin) & (points.xcoords < region.xmax)
mask &= (points.ycoords >= region.ymin) & (points.ycoords < region.ymax)
return points[mask]
by_grid = {}
for x in range(masked_grid.xextent):
for y in range(masked_grid.yextent):
if masked_grid.is_valid(x, y):
by_grid[(x,y)] = points_in(masked_grid.bounding_box_of_cell(x, y))
size_lookup = { key : tp.number_data_points for key, tp in by_grid.items() }
size_lookup = list(size_lookup.items())
size_lookup.sort(key = lambda p : p[1])
size_lookup[-5:]
distances = {}
for key, tp in by_grid.items():
cell = masked_grid.bounding_box_of_cell(*key)
midx, midy = (cell.xmin + cell.xmax)/2, (cell.ymin + cell.ymax)/2
distances[key] = np.sqrt((tp.xcoords - midx)**2 + (tp.ycoords - midy)**2)
start = points.time_range[0]
end = points.time_range[1]
length = (end - start) / np.timedelta64(1,"m")
fig, axes = plt.subplots(nrows=5, figsize=(18,8))
for i, ax in enumerate(axes):
key = size_lookup[-1-i][0]
ts = (by_grid[key].timestamps - start) / np.timedelta64(1,"m")
ax.scatter(ts, distances[key])
ax.set(xlabel="minutes", ylabel="distance")
ax.set(title="For grid cell {}".format(key))
ax.set(xlim=[0,length])
fig.tight_layout()
key = size_lookup[-1][0]
ts = by_grid[key].timestamps
ts = (np.asarray(ts) - start) / np.timedelta64(1,"m")
rate = len(ts) / length
def comp(t):
return rate * t
def qq_data(a, b, ax):
a, b = np.array(a), np.array(b)
a.sort()
b.sort()
ax.scatter(a, b)
def qq_exponential(a, ax, **kwargs):
a = np.array(a)
a.sort()
b = []
for i, x in enumerate(a):
p = i / len(a)
b.append( -np.log(1-p) )
ax.scatter(a, b, **kwargs)
ax.set(xlabel="data", ylabel="theory")
def correlate(a, ax, **kwargs):
# To uniform dist
u = 1 - np.exp(-a)
ax.scatter(u[:-1], u[1:], **kwargs)
diffs = comp(ts)
diffs = diffs[1:] - diffs[:-1]
def do_test_plots(diffs, alpha=1):
expected = np.random.exponential(size=len(diffs))
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(12, 12))
ax = axes[0][0]
qq_exponential(diffs, ax, alpha=alpha)
ax.plot([0,5], [0,5], color="red")
ax.set(title="Q-Q plot against exponential")
ax = axes[0][1]
correlate(diffs, ax, alpha=alpha)
ax.set(title="Autocorrelation")
ax = axes[1][0]
qq_exponential(expected, ax, color="green", alpha=alpha)
ax.plot([0,5], [0,5], color="red")
ax.set(title="Sample of exponential data")
ax = axes[1][1]
correlate(expected, ax, color="green", alpha=alpha)
do_test_plots(diffs)
diffs = []
for key, tp in by_grid.items():
ts = tp.timestamps
if len(ts) < 10:
continue
ts = (np.asarray(ts) - start) / np.timedelta64(1,"m")
rate = len(ts) / length
ts *= rate
d = ts[1:] - ts[:-1]
diffs.extend(d)
diffs = np.asarray(diffs)
do_test_plots(diffs, alpha=0.2)
diffs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'hadgem3-gc31-hm', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's go over the columns
Step2: We've done much of the data processing for you. Fields like timestamp and sid are standardized across all our Store Datasets, so the datasets are easy to combine. We have standardized the sid across all our equity databases.
Step3: Finally, suppose we want a DataFrame of that data, but we only want the sid, timestamp, and div_type
Step4: <a id='pipeline'></a>
Step5: Now that we've imported the data, let's take a look at which fields are available for each dataset.
Step6: Now that we know what fields we have access to, let's see what this data looks like when we run it through Pipeline.
Step7: Taking what we've seen from above, let's see how we'd move that into the backtester.
|
<ASSISTANT_TASK:>
Python Code:
# import the dataset
# from quantopian.interactive.data.eventvestor import dividends as dataset
# or if you want to import the free dataset, use:
from quantopian.interactive.data.eventvestor import dividends_free as dataset
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
dataset.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
dataset.count()
# Let's see what the data looks like. We'll grab the first three rows.
dataset[:3]
fiftyc = dataset[(dataset.div_amount==0.5) & (dataset['div_currency']=='$')]
# When displaying a Blaze Data Object, the printout is automatically truncated to ten rows.
fiftyc.sort('timestamp')
fifty_df = odo(fiftyc, pd.DataFrame)
reduced = fifty_df[['sid','div_type','timestamp']]
# When printed: pandas DataFrames display the head(30) and tail(30) rows, and truncate the middle.
reduced
fifty_df = odo(fiftyc, pd.DataFrame)
reduced = fifty_df[['sid','div_type','timestamp']]
# When printed: pandas DataFrames display the head(30) and tail(30) rows, and truncate the middle.
reduced
# Import necessary Pipeline modules
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# Import the datasets available
from quantopian.pipeline.data.eventvestor import (
DividendsByExDate,
DividendsByPayDate,
DividendsByAnnouncementDate,
)
from quantopian.pipeline.factors.eventvestor import (
BusinessDaysSincePreviousExDate,
BusinessDaysUntilNextExDate,
BusinessDaysSincePreviousPayDate,
BusinessDaysUntilNextPayDate,
BusinessDaysSinceDividendAnnouncement,
)
print "Here are the list of available fields per dataset:"
print "---------------------------------------------------\n"
def _print_fields(dataset):
print "Dataset: %s\n" % dataset.__name__
print "Fields:"
for field in list(dataset.columns):
print "%s - %s" % (field.name, field.dtype)
print "\n"
for data in (DividendsByExDate, DividendsByPayDate, DividendsByAnnouncementDate):
_print_fields(data)
print "---------------------------------------------------\n"
# Let's see what this data looks like when we run it through Pipeline
# This is constructed the same way as you would in the backtester. For more information
# on using Pipeline in Research view this thread:
# https://www.quantopian.com/posts/pipeline-in-research-build-test-and-visualize-your-factors-and-filters
pipe = Pipeline()
pipe.add(DividendsByExDate.next_date.latest, 'next_ex_date')
pipe.add(DividendsByExDate.previous_date.latest, 'prev_ex_date')
pipe.add(DividendsByExDate.next_amount.latest, 'next_amount')
pipe.add(DividendsByExDate.previous_amount.latest, 'prev_amount')
pipe.add(DividendsByExDate.next_currency.latest, 'next_currency')
pipe.add(DividendsByExDate.previous_currency.latest, 'prev_currency')
pipe.add(DividendsByExDate.next_type.latest, 'next_type')
pipe.add(DividendsByExDate.previous_type.latest, 'prev_type')
# Setting some basic liquidity strings (just for good habit)
dollar_volume = AverageDollarVolume(window_length=20)
top_1000_most_liquid = dollar_volume.rank(ascending=False) < 1000
pipe.set_screen(top_1000_most_liquid & DividendsByExDate.previous_amount.latest.notnan())
# The show_graph() method of pipeline objects produces a graph to show how it is being calculated.
pipe.show_graph(format='png')
# run_pipeline will show the output of your pipeline
pipe_output = run_pipeline(pipe, start_date='2013-11-01', end_date='2013-11-25')
pipe_output
# This section is only importable in the backtester
from quantopian.algorithm import attach_pipeline, pipeline_output
# General pipeline imports
from quantopian.pipeline import Pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# Import the datasets available
from quantopian.pipeline.data.eventvestor import (
DividendsByExDate,
DividendsByPayDate,
DividendsByAnnouncementDate,
)
from quantopian.pipeline.factors.eventvestor import (
BusinessDaysSincePreviousExDate,
BusinessDaysUntilNextExDate,
BusinessDaysSinceDividendAnnouncement,
)
def make_pipeline():
# Create our pipeline
pipe = Pipeline()
# Screen out penny stocks and low liquidity securities.
dollar_volume = AverageDollarVolume(window_length=20)
is_liquid = dollar_volume.rank(ascending=False) < 1000
# Create the mask that we will use for our percentile methods.
base_universe = (is_liquid)
# Add pipeline factors
pipe.add(DividendsByExDate.next_date.latest, 'next_ex_date')
pipe.add(DividendsByExDate.previous_date.latest, 'prev_ex_date')
pipe.add(DividendsByExDate.next_amount.latest, 'next_amount')
pipe.add(DividendsByExDate.previous_amount.latest, 'prev_amount')
pipe.add(DividendsByExDate.next_currency.latest, 'next_currency')
pipe.add(DividendsByExDate.previous_currency.latest, 'prev_currency')
pipe.add(DividendsByExDate.next_type.latest, 'next_type')
pipe.add(DividendsByExDate.previous_type.latest, 'prev_type')
pipe.add(BusinessDaysUntilNextExDate(), 'business_days')
# Set our pipeline screens
pipe.set_screen(is_liquid)
return pipe
def initialize(context):
attach_pipeline(make_pipeline(), "pipeline")
def before_trading_start(context, data):
results = pipeline_output('pipeline')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulation of Sequence of Coins
Step2: Discussing Probability of Sequences
Step3: Again
|
<ASSISTANT_TASK:>
Python Code:
# importing
import numpy as np
from scipy import stats, special
from decimal import Decimal
import matplotlib.pyplot as plt
import matplotlib
# showing figures inline
%matplotlib inline
# plotting options
font = {'size' : 20}
plt.rc('font', **font)
plt.rc('text', usetex=True)
matplotlib.rc('figure', figsize=(18, 6) )
# number of points to be sampled
len_sequence = 1000
N_sequences = 1000
# initialize storage for sequences
results = np.zeros( ( N_sequences, len_sequence ) )
# vector of lengths from 1 to len_sequence in order to perform averaging
lengths = np.arange( 1, len_sequence + 1 )
# loop for sequence length
for n in np.arange( N_sequences ):
# sample sequence
sequence = np.random.choice( [ 0 , 1 ], size = len_sequence, p = [ .5, .5 ] )
# summing up and normalizing
# NOTE: By doing so, occurences are always normalized to the length of the observation
results[ n, :] = np.cumsum( sequence ) / lengths
# plotting
for n in np.arange( N_sequences ):
plt.plot( range(1, len_sequence+1), results[n, :], linewidth = 2.0 )
plt.grid( True )
plt.xlabel('$N$')
plt.ylabel('$H_N($'+'Kopf'+'$)$')
plt.margins(.1)
# number of samples
N = 100
# probability for sampling 1
p = 0.9
print( 'Model:' )
print( '------\n' )
print( '{} times indenpendently sampling a bit'.format( N ) )
print( 'P( 1 ) = {}\n\n'.format( p ) )
print( 'Results:' )
print( '--------\n' )
print( 'P( 11...11 ) \t\t\t = {}\n'.format( p**N ) )
print( 'P( 10 x 0, 90 x 1 ) \t\t = {}'.format( (1-p)**10 * p**(N-10) ) )
print( 'Binomial coefficient (N, 10) \t = {:.2e}'.format( Decimal( special.binom( N, 10 ) ) ) )
print( '''P( 10 x 0 'somewhere' ) \t = {}'''.format( special.binom( N, 10 ) * (1-p)**10 * p**(N-10) ) )
# number of points to be sampled
len_sequence = 10
N_sequences = 2000
# initialize storage for sequences
results = np.zeros( ( N_sequences, len_sequence ) )
# vector of lengths in order to perform averaging
lengths = np.arange( 1, len_sequence + 1 )
# loop for sequence length
for n in np.arange( N_sequences ):
# sample sequence
sequence = np.random.choice( [ 0 , 1 ], size = len_sequence, p = [ .5, .5 ] )
# summing up and normalizing
results[ n, :] = np.cumsum( sequence ) / lengths
# plotting
for n in np.arange( N_sequences ):
plt.plot( range(1, len_sequence+1), results[n, :], linewidth = 2.0 )
plt.grid( True )
plt.xlabel('$N$')
plt.ylabel('$H_N($'+'Kopf'+'$)$')
plt.margins(.1)
# now determine histogram for the occurrence of end points,
# showing that all points may be observed but likelihood is very different
# extract end-points
results_end = results[:, -1]
# get histogram
num_bins = 20
width = 2/num_bins
bins = np.linspace(0, 1, num_bins, endpoint=True)
r_hist = np.histogram( results_end, bins = bins, density = True )
plt.barh( r_hist[1][:-1], 0 + r_hist[0] / np.sum(r_hist[0]) * 5 , width, left=len_sequence+.1, color = '#ff7f0e' )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First reload the data we generated in 1_notmist.ipynb.
Step2: Reformat into a shape that's more adapted to the models we're going to train
Step3: We're first going to train a multinomial logistic regression using simple gradient descent.
Step4: Let's run this computation and iterate
Step5: Let's now switch to stochastic gradient descent training instead, which is much faster.
Step6: Let's run it
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random valued following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(1024 * tf.nn.relu((tf.matmul(tf_test_dataset, weights) + biases)))
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, we can create a micromagnetic system object.
Step2: Let us assume we have a simple Hamiltonian which consists of only Zeeman energy term
Step3: In the next step we can define the system's dynamics. Let us assume we have $\gamma_{0} = 2.211 \times 10^{5} \,\text{m}\,\text{A}^{-1}\,\text{s}^{-1}$ and $\alpha=0.1$.
Step4: To check what is our dynamics equation
Step5: Before we start running time evolution simulations, we need to initialise the magnetisation. In this case, our magnetisation is pointing in the positive $x$ direction with $M_\text{s} = 8 \times 10^{6} \,\text{A}\,\text{m}^{-1}$. The magnetisation is defined using Field class from the discretisedfield package we imported earlier.
Step6: Now, we can run the time evolution using TimeDriver for $t=0.1 \,\text{ns}$ and save the magnetisation configuration in $n=200$ steps.
Step7: How different system parameters vary with time, we can inspect by showing the system's datatable.
Step8: However, in our case it is much more informative if we plot the time evolution of magnetisation $z$ component $m_{z}(t)$.
Step9: Similarly, we can plot all three magnetisation components
Step10: We can see that after some time the macrospin aligns parallel to the external magnetic field in the $z$ direction. We can explore the effect of Gilbert damping $\alpha = 0.2$ on the magnetisation dynamics.
|
<ASSISTANT_TASK:>
Python Code:
import oommfc as oc
import discretisedfield as df
%matplotlib inline
# Define macro spin mesh (i.e. one discretisation cell).
p1 = (0, 0, 0) # first point of the mesh domain (m)
p2 = (1e-9, 1e-9, 1e-9) # second point of the mesh domain (m)
cell = (1e-9, 1e-9, 1e-9) # discretisation cell size (m)
mesh = oc.Mesh(p1=p1, p2=p2, cell=cell)
system = oc.System(name="macrospin")
H = (0, 0, 2e6) # external magnetic field (A/m)
system.hamiltonian = oc.Zeeman(H=H)
gamma = 2.211e5 # gyromagnetic ratio (m/As)
alpha = 0.1 # Gilbert damping
system.dynamics = oc.Precession(gamma=gamma) + oc.Damping(alpha=alpha)
system.dynamics
initial_m = (1, 0, 0) # vector in x direction
Ms = 8e6 # magnetisation saturation (A/m)
system.m = df.Field(mesh, value=initial_m, norm=Ms)
td = oc.TimeDriver()
td.drive(system, t=0.1e-9, n=200)
system.dt
system.dt.plot("t", "mz");
system.dt.plot("t", ["mx", "my", "mz"]);
system.dynamics.damping.alpha = 0.2
system.m = df.Field(mesh, value=initial_m, norm=Ms)
td.drive(system, t=0.1e-9, n=200)
system.dt.plot("t", ["mx", "my", "mz"]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: POS TAG Frequencies
Step2: tweet length
Step3: Learn from the extracted features from here on
|
<ASSISTANT_TASK:>
Python Code:
# POS Tag frequencies
from nltk.tag import pos_tag_sents
all_pos_tags = [pos_tag_sents(pos_tokenize(tokens)) for tokens in cb_feat_postText]
tag_list = []
for tweets in all_pos_tags:
tweet_tokens=""
for elements in tweets:
tweet_tokens += elements[0][1] + " "
tag_list.append(tweet_tokens)
pos_tag_cv = CountVectorizer(ngram_range=(2, 5))
cb_feat_pos_ngrams = pos_tag_cv.fit_transform(tag_list)
print(cb_feat_pos_ngrams.shape) # 2459, 2649
from collections import OrderedDict
tag_frequency_list = []
for tweets in all_pos_tags:
tag_freq=OrderedDict({'NN': 0, 'NNS': 0, 'VB': 0, 'IN': 0, 'VBG': 0, 'RB': 0, 'JJ': 0, 'NNP': 0, 'JJS': 0,
'DT': 0, 'CC': 0, 'VBD': 0, 'PRP$': 0, 'MD': 0, 'PRP': 0, 'CD': 0, 'TO': 0, 'VBN': 0,
'WP': 0, 'VBZ': 0, 'JJR': 0, 'WRB': 0, 'VBP': 0, 'WDT': 0, 'RBR': 0, 'EX': 0, 'RBS': 0,
'WP$': 0, ':': 0, 'NNPS': 0, 'UH': 0, 'SYM': 0, 'FW': 0, 'LS': 0})
for elements in tweets:
tag_freq[str(elements[0][1])] += 1
tag_frequency_list.append(list(tag_freq.values()))
cb_feat_pos_frequency = numpy.asarray(tag_frequency_list)
# TODO NER on tweets is not trivial, dont use this feature jet
from nltk.tag import StanfordNERTagger
st = StanfordNERTagger('/home/mike4537/opt/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz',
'/home/mike4537/opt/stanford-ner/stanford-ner.jar',
encoding='utf-8')
text = 'While in France, Christine Lagarde discussed short-term stimulus efforts in a recent interview with the Wall Street Journal.'
tokenized_text = tokenize(cb_feat_postText[0])
classified_text = st.tag(tokenized_text)
print(classified_text)
from nltk import ne_chunk, pos_tag
from nltk.chunk import tree2conlltags
# how does this work ????
for tweet in cb_feat_postText:
chunk_tree = ne_chunk(pos_tag([x for x in preprocess_tokenize(tweet)]))
iob = tree2conlltags(chunk_tree)
for tag in iob:
if tag[2] != 'O':
print(tag)
# Word 1-grams matrix
'''
input=u'content', strip_accents=None, ngram_range=(1, 1) -> all ngrams between (inclusive)
analyzer=u'word' -> {‘word’, ‘char’, ‘char_wb’}
preprocessor=None, tokenizer=None, stop_words=None, token_pattern=u'(?u)\b\w\w+\b',
'''
# there is a difference in feature dimension(100) if i use my tokenizer vs. the build in one??????
count_vectorizer = CountVectorizer(preprocessor=preprocess, tokenizer=tokenize, ngram_range=(1, 5))
cb_feat_textwordonegram = count_vectorizer.fit_transform(cb_feat_postText)
print(cb_feat_textwordonegram.shape) # 2459, 8095
cb_feat_wordlength = []
for tweet in cb_feat_postText:
cb_feat_wordlength.append(len([x for x in tokenize(tweet)]))
cb_feat_wordlength = numpy.asarray(cb_feat_wordlength)
print(cb_feat_wordlength)
from scipy.sparse import hstack
cb_feat_all = hstack((cb_feat_textwordonegram, cb_feat_pos_frequency))
cb_feat_all = hstack((cb_feat_all, cb_feat_wordlength[:, numpy.newaxis]))
cb_feat_all = hstack((cb_feat_all, cb_feat_pos_ngrams))
print(cb_feat_all.shape)
from sklearn.model_selection import train_test_split
# split x,y dataset in train/test:
x_train, x_test, y_train, y_test = train_test_split(cb_feat_all, cb_scores.T, random_state=42)
y_test_random1 = numpy.full((len(y_test)), 0.5)
y_test_random2 = numpy.full((len(y_test)), 0.5)
# print(y_test_random1)
# model evaluation
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn import svm
# model = RandomForestRegressor()
model = svm.SVR()
model.fit(x_train, y_train)
y_predicted = model.predict(x_test)
# NOTE: mean square is bullshit if values are < 1
# cross entropy für differenz zwischen verteilungen
# score = cross_val_score(model, x_train, y_train, cv=5)
# print("cross_val " + str(score.mean()) + " +- " + str(score.std()))
print("Explained variance score: " + str(explained_variance_score(y_test, y_predicted))) # lower means: accounts less for variance in data
print("Mean squared error: " + str(mean_squared_error(y_test, y_predicted))) # how close does the model get on average
print("Mean absolute error: " + str(mean_absolute_error(y_test, y_predicted))) # are we biased?
print("Median absolute error: " + str(median_absolute_error(y_test, y_predicted))) # outliers?
print("R^2: " + str(r2_score(y_test, y_predicted))) # future predictions
print("Mean squared error to random1: " + str(mean_squared_error(y_test, y_test_random1))) # how close does the model get on average
print("Mean absolute error to random1: " + str(mean_absolute_error(y_test, y_test_random1))) # are we biased?
fig = plt.figure()
tmp = fig.add_subplot(3, 1, 1)
tmp = plt.plot(y_test)
tmp = fig.add_subplot(3, 1, 2)
tmp = plt.plot(y_predicted)
tmp = fig.add_subplot(3, 1, 3)
tmp = plt.plot([b - a for (a, b) in zip(y_predicted, y_test)])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate data
Step2: Plot the data and the model
Step3: Maximize log-likelihood
Step4: Plot objective function, true parameter, and the estimate
Step5: Solve first order conditions
Step6: Plot first order condition
Step7: Plot original data and fitted mean
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns
np.set_printoptions(precision=4, suppress=True)
sns.set_context('notebook')
%matplotlib inline
# True parameter
theta = .5
# Sample size
n = int(1e2)
# Independent variable, N(0,1)
X = np.random.normal(0, 1, n)
# Sort data for nice plots
X = np.sort(X)
mu = np.exp(X * theta)
# Error term, N(0,1)
Y = np.random.poisson(mu, n)
plt.figure(figsize = (8, 8))
plt.scatter(X, Y, label='Observed data')
plt.ylabel(r'$Y$')
plt.xlabel(r'$X$')
plt.show()
import scipy.optimize as opt
from scipy.stats import poisson
# Define objective function
def f(theta, X, Y):
Q = - np.sum(np.log(1e-3 + poisson.pmf(Y, np.exp(X * theta))))
return Q
# Run optimization routine
theta_hat = opt.fmin_bfgs(f, 0., args=(X, Y))
print(theta_hat)
# Generate data for objective function plot
th = np.linspace(-3., 3., 1e2)
Q = [f(z, X, Y) for z in th]
# Plot the data
plt.figure(figsize=(8, 4))
plt.plot(th, Q, label='Q')
plt.xlabel(r'$\theta$')
plt.axvline(x=theta_hat, c='red', label='Estimated')
plt.axvline(x=theta, c='black', label='True')
plt.legend()
plt.show()
from scipy.optimize import fsolve
# Define the first order condition
def df(theta, X, Y):
return - np.sum(X * (Y - np.exp(X * theta)))
# Solve FOC
theta_hat = fsolve(df, 0., args=(X, Y))
print(theta_hat)
# Generate data for the plot
th = np.linspace(-3., 3., 1e2)
Q = np.array([df(z, X, Y) for z in th])
# Plot the data
plt.figure(figsize=(8, 4))
plt.plot(th, Q, label='Q')
plt.xlabel(r'$\beta$')
plt.axvline(x=theta_hat, c='red', label='Estimated')
plt.axvline(x=theta, c='black', label='True')
plt.axhline(y=0, c='green')
plt.legend()
plt.show()
plt.figure(figsize=(8, 8))
plt.scatter(X, Y, label='Observed data')
plt.plot(X, np.exp(X * theta_hat), label='Fitted mean')
plt.ylabel(r'$Y$')
plt.xlabel(r'$X$')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
<ASSISTANT_TASK:>
Python Code:
%run ../bst/bst.py
%load ../bst/bst.py
def check_balance(root):
# TODO: Implement me
pass
# %load test_check_balance.py
from nose.tools import assert_equal
class TestCheckBalance(object):
def test_check_balance(self):
node = Node(5)
insert(node, 3)
insert(node, 8)
insert(node, 1)
insert(node, 4)
assert_equal(check_balance(node), True)
node = Node(5)
insert(node, 3)
insert(node, 8)
insert(node, 9)
insert(node, 10)
assert_equal(check_balance(node), False)
print('Success: test_check_balance')
def main():
test = TestCheckBalance()
test.test_check_balance()
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We use <span style="font-variant
Step2: The annotated grammar is stored in the file Grammar.g4.
Step3: We start by generating both scanner and parser.
Step4: The Class GrammarRule
Step5: The function parse_grammar takes a string filename as its argument and returns the grammar that is stored in the specified file. The grammar is represented as list of rules. Each rule is represented as a tuple. The example below will clarify this structure.
Step6: Given a string name, which is either a variable, a token, or a literal, the function is_var checks whether name is a variable. The function can distinguish variable names from tokens and literals because variable names consist only of lower case letters, while tokens are all uppercase and literals start with the character "'".
Step7: Fun Fact
Step8: Given a list Rules of GrammarRules, the function collect_variables(Rules) returns the set of all variables occuring in Rules.
Step9: Given a set Rules of GrammarRules, the function collect_tokens(Rules) returns the set of all tokens and literals occuring in Rules.
Step10: Marked Rules
Step11: Given a marked rule self, the function is_complete checks, whether the marked rule self has the form
Step12: Given a marked rule self of the form
Step13: Given a marked rule, this function returns the variable following the dot. If there is no variable following the dot, the function returns None.
Step14: The function move_dot(self) transforms a marked rule of the form
Step15: The function to_rule(self) turns the marked rule self into a GrammarRule, i.e. the marked rule
Step16: SLR-Table-Generation
Step17: Given a set of Variables, the function initialize_dictionary returns a dictionary that assigns the empty set to all variables.
Step18: Given a Grammar, the function compute_tables computes
Step19: The function compute_rule_names assigns a unique name to each rule of the grammar. These names are used later
Step20: The function compute_first(self) computes the sets $\texttt{First}(c)$ for all variables $c$ and stores them in the dictionary mFirst. Abstractly, given a variable $c$ the function $\texttt{First}(c)$ is the set of all tokens that can start a string that is derived from $c$
Step21: Given a tuple of variables and tokens alpha, the function first_list(alpha) computes the function $\texttt{FirstList}(\alpha)$ that has been defined above. If alpha is nullable, then the result will contain the empty string $\varepsilon = \texttt{''}$.
Step22: The arguments S and T of eps_union are sets that contain tokens and, additionally, they might contain the empty string.
Step23: Given an augmented grammar $G = \langle V,T,R\cup{\widehat{s} \rightarrow s\,\$}, \widehat{s}\rangle$
Step24: If $\mathcal{M}$ is a set of marked rules, then the closure of $\mathcal{M}$ is the smallest set $\mathcal{K}$ such that
Step25: Given a set of marked rules $\mathcal{M}$ and a grammar symbol $X$, the function $\texttt{goto}(\mathcal{M}, X)$
Step26: The function all_states computes the set of all states of an SLR-parser. The function starts with the state
Step27: The following function computes the action table and is defined as follows
Step28: The function compute_goto_table computes the goto table.
|
<ASSISTANT_TASK:>
Python Code:
!cat Examples/c-grammar.g
!cat Pure.g4
!cat -n Grammar.g4
!antlr4 -Dlanguage=Python3 Grammar.g4
from GrammarLexer import GrammarLexer
from GrammarParser import GrammarParser
import antlr4
class GrammarRule:
def __init__(self, variable, body):
self.mVariable = variable
self.mBody = body
def __eq__(self, other):
return isinstance(other, GrammarRule) and \
self.mVariable == other.mVariable and \
self.mBody == other.mBody
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
return f'{self.mVariable} → {" ".join(self.mBody)}'
def parse_grammar(filename):
input_stream = antlr4.FileStream(filename, encoding="utf-8")
lexer = GrammarLexer(input_stream)
token_stream = antlr4.CommonTokenStream(lexer)
parser = GrammarParser(token_stream)
grammar = parser.start()
return [GrammarRule(head, tuple(body)) for head, *body in grammar.g]
grammar = parse_grammar('Examples/c-grammar.g')
grammar
def is_var(name):
return name[0] != "'" and name.islower()
"'return'".islower()
def collect_variables(Rules):
Variables = set()
for rule in Rules:
Variables.add(rule.mVariable)
for item in rule.mBody:
if is_var(item):
Variables.add(item)
return Variables
def collect_tokens(Rules):
Tokens = set()
for rule in Rules:
for item in rule.mBody:
if not is_var(item):
Tokens.add(item)
return Tokens
class MarkedRule():
def __init__(self, variable, alpha, beta):
self.mVariable = variable
self.mAlpha = alpha
self.mBeta = beta
def __eq__(self, other):
return isinstance(other, MarkedRule) and \
self.mVariable == other.mVariable and \
self.mAlpha == other.mAlpha and \
self.mBeta == other.mBeta
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
alphaStr = ' '.join(self.mAlpha)
betaStr = ' '.join(self.mBeta)
return f'{self.mVariable} → {alphaStr} • {betaStr}'
def is_complete(self):
return len(self.mBeta) == 0
MarkedRule.is_complete = is_complete
del is_complete
def symbol_after_dot(self):
if len(self.mBeta) > 0:
return self.mBeta[0]
return None
MarkedRule.symbol_after_dot = symbol_after_dot
del symbol_after_dot
def next_var(self):
if len(self.mBeta) > 0:
var = self.mBeta[0]
if is_var(var):
return var
return None
MarkedRule.next_var = next_var
del next_var
def move_dot(self):
return MarkedRule(self.mVariable,
self.mAlpha + (self.mBeta[0],),
self.mBeta[1:])
MarkedRule.move_dot = move_dot
del move_dot
def to_rule(self):
return GrammarRule(self.mVariable, self.mAlpha + self.mBeta)
MarkedRule.to_rule = to_rule
del to_rule
class Grammar():
def __init__(self, Rules):
self.mRules = Rules
self.mStart = Rules[0].mVariable
self.mVariables = collect_variables(Rules)
self.mTokens = collect_tokens(Rules)
self.mStates = set()
self.mStateNames = {}
self.mConflicts = False
self.mVariables.add('ŝ')
self.mTokens.add('$')
self.mRules.append(GrammarRule('ŝ', (self.mStart, '$'))) # augmenting
self.compute_tables()
def initialize_dictionary(Variables):
return { a: set() for a in Variables }
def compute_tables(self):
self.mFirst = initialize_dictionary(self.mVariables)
self.mFollow = initialize_dictionary(self.mVariables)
self.compute_first()
self.compute_follow()
self.compute_rule_names()
self.all_states()
self.compute_action_table()
self.compute_goto_table()
Grammar.compute_tables = compute_tables
del compute_tables
def compute_rule_names(self):
self.mRuleNames = {}
counter = 0
for rule in self.mRules:
self.mRuleNames[rule] = 'r' + str(counter)
counter += 1
Grammar.compute_rule_names = compute_rule_names
del compute_rule_names
def compute_first(self):
change = True
while change:
change = False
for rule in self.mRules:
a, body = rule.mVariable, rule.mBody
first_body = self.first_list(body)
if not (first_body <= self.mFirst[a]):
change = True
self.mFirst[a] |= first_body
print('First sets:')
for v in self.mVariables:
print(f'First({v}) = {self.mFirst[v]}')
Grammar.compute_first = compute_first
del compute_first
def first_list(self, alpha):
if len(alpha) == 0:
return { '' }
elif is_var(alpha[0]):
v, *r = alpha
return eps_union(self.mFirst[v], self.first_list(r))
else:
t = alpha[0]
return { t }
Grammar.first_list = first_list
del first_list
def eps_union(S, T):
if '' in S:
if '' in T:
return S | T
return (S - { '' }) | T
return S
def compute_follow(self):
self.mFollow[self.mStart] = { '$' }
change = True
while change:
change = False
for rule in self.mRules:
a, body = rule.mVariable, rule.mBody
for i in range(len(body)):
if is_var(body[i]):
yi = body[i]
Tail = self.first_list(body[i+1:])
firstTail = eps_union(Tail, self.mFollow[a])
if not (firstTail <= self.mFollow[yi]):
change = True
self.mFollow[yi] |= firstTail
print('Follow sets (note that "$" denotes the end of file):');
for v in self.mVariables:
print(f'Follow({v}) = {self.mFollow[v]}')
Grammar.compute_follow = compute_follow
del compute_follow
def cmp_closure(self, Marked_Rules):
All_Rules = Marked_Rules
New_Rules = Marked_Rules
while True:
More_Rules = set()
for rule in New_Rules:
c = rule.next_var()
if c == None:
continue
for rule in self.mRules:
head, alpha = rule.mVariable, rule.mBody
if c == head:
More_Rules |= { MarkedRule(head, (), alpha) }
if More_Rules <= All_Rules:
return frozenset(All_Rules)
New_Rules = More_Rules - All_Rules
All_Rules |= New_Rules
Grammar.cmp_closure = cmp_closure
del cmp_closure
def goto(self, Marked_Rules, x):
Result = set()
for mr in Marked_Rules:
if mr.symbol_after_dot() == x:
Result.add(mr.move_dot())
return self.cmp_closure(Result)
Grammar.goto = goto
del goto
def all_states(self):
start_state = self.cmp_closure({ MarkedRule('ŝ', (), (self.mStart, '$')) })
self.mStates = { start_state }
New_States = self.mStates
while True:
More_States = set()
for Rule_Set in New_States:
for mr in Rule_Set:
if not mr.is_complete():
x = mr.symbol_after_dot()
if x != '$':
More_States |= { self.goto(Rule_Set, x) }
if More_States <= self.mStates:
break
New_States = More_States - self.mStates;
self.mStates |= New_States
print("All SLR-states:")
counter = 1
self.mStateNames[start_state] = 's0'
print(f's0 = {set(start_state)}')
for state in self.mStates - { start_state }:
self.mStateNames[state] = f's{counter}'
print(f's{counter} = {set(state)}')
counter += 1
Grammar.all_states = all_states
del all_states
def compute_action_table(self):
self.mActionTable = {}
print('\nAction Table:')
for state in self.mStates:
stateName = self.mStateNames[state]
actionTable = {}
# compute shift actions
for token in self.mTokens:
if token != '$':
newState = self.goto(state, token)
if newState != set():
newName = self.mStateNames[newState]
actionTable[token] = ('shift', newName)
self.mActionTable[stateName, token] = ('shift', newName)
print(f'action("{stateName}", {token}) = ("shift", {newName})')
# compute reduce actions
for mr in state:
if mr.is_complete():
for token in self.mFollow[mr.mVariable]:
action1 = actionTable.get(token)
action2 = ('reduce', mr.to_rule())
if action1 == None:
actionTable[token] = action2
r = self.mRuleNames[mr.to_rule()]
self.mActionTable[stateName, token] = ('reduce', r)
print(f'action("{stateName}", {token}) = {action2}')
elif action1 != action2:
self.mConflicts = True
print('')
print(f'conflict in state {stateName}:')
print(f'{stateName} = {state}')
print(f'action("{stateName}", {token}) = {action1}')
print(f'action("{stateName}", {token}) = {action2}')
print('')
for mr in state:
if mr == MarkedRule('ŝ', (self.mStart,), ('$',)):
actionTable['$'] = 'accept'
self.mActionTable[stateName, '$'] = 'accept'
print(f'action("{stateName}", $) = accept')
Grammar.compute_action_table = compute_action_table
del compute_action_table
def compute_goto_table(self):
self.mGotoTable = {}
print('\nGoto Table:')
for state in self.mStates:
for var in self.mVariables:
newState = self.goto(state, var)
if newState != set():
stateName = self.mStateNames[state]
newName = self.mStateNames[newState]
self.mGotoTable[stateName, var] = newName
print(f'goto({stateName}, {var}) = {newName}')
Grammar.compute_goto_table = compute_goto_table
del compute_goto_table
%%time
g = Grammar(grammar)
def strip_quotes(t):
if t[0] == "'" and t[-1] == "'":
return t[1:-1]
return t
def dump_parse_table(self, file):
with open(file, 'w') as handle:
handle.write('# Grammar rules:\n')
for rule in self.mRules:
rule_name = self.mRuleNames[rule]
handle.write(f'{rule_name} = ("{rule.mVariable}", {rule.mBody})\n')
handle.write('\n# Action table:\n')
handle.write('actionTable = {}\n')
for s, t in self.mActionTable:
action = self.mActionTable[s, t]
t = strip_quotes(t)
if action[0] == 'reduce':
rule_name = action[1]
handle.write(f"actionTable['{s}', '{t}'] = ('reduce', {rule_name})\n")
elif action == 'accept':
handle.write(f"actionTable['{s}', '{t}'] = 'accept'\n")
else:
handle.write(f"actionTable['{s}', '{t}'] = {action}\n")
handle.write('\n# Goto table:\n')
handle.write('gotoTable = {}\n')
for s, v in self.mGotoTable:
state = self.mGotoTable[s, v]
handle.write(f"gotoTable['{s}', '{v}'] = '{state}'\n")
Grammar.dump_parse_table = dump_parse_table
del dump_parse_table
g.dump_parse_table('parse-table.py')
!cat parse-table.py
!rm GrammarLexer.* GrammarParser.* Grammar.tokens GrammarListener.py Grammar.interp
!rm -r __pycache__
!ls
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting Up S3 Access Using Boto
Step2: ERA5 Data Structure on S3
Step3: Let's take a look at the objects available for a specific month using boto's list_objects_v2 method.
Step4: Downloading Files
Step5: Now let's acquire data for a single variable over the course of a month. Let's download air temperature for August of 2017 and open the NetCDF file using xarray.
Step6: The ds.info output above shows us that there are three dimensions to the data
Step7: In the coordinate values, we can see that longitude is expressed as degrees east, ranging from 0 to 359.718 degrees. Latitude is expressed as degrees north, ranging from -89.784874 to 89.784874. And finally the time0 coordinate, ranging from 2017-08-01T07
Step8: Convert Units and Create a Dataframe
Step9: Show Me Some Charts!
|
<ASSISTANT_TASK:>
Python Code:
# Initialize notebook environment.
%matplotlib inline
import boto3
import botocore
import datetime
import matplotlib.pyplot as plt
import os.path
import xarray as xr
era5_bucket = 'era5-pds'
# AWS access / secret keys required
# s3 = boto3.resource('s3')
# bucket = s3.Bucket(era5_bucket)
# No AWS keys required
client = boto3.client('s3', config=botocore.client.Config(signature_version=botocore.UNSIGNED))
paginator = client.get_paginator('list_objects')
result = paginator.paginate(Bucket=era5_bucket, Delimiter='/')
for prefix in result.search('CommonPrefixes'):
print(prefix.get('Prefix'))
keys = []
date = datetime.date(2018,1,1) # update to desired date
prefix = date.strftime('%Y/%m/')
response = client.list_objects_v2(Bucket=era5_bucket, Prefix=prefix)
response_meta = response.get('ResponseMetadata')
if response_meta.get('HTTPStatusCode') == 200:
contents = response.get('Contents')
if contents == None:
print("No objects are available for %s" % date.strftime('%B, %Y'))
else:
for obj in contents:
keys.append(obj.get('Key'))
print("There are %s objects available for %s\n--" % (len(keys), date.strftime('%B, %Y')))
for k in keys:
print(k)
else:
print("There was an error with your request.")
metadata_file = 'main.nc'
metadata_key = prefix + metadata_file
client.download_file(era5_bucket, metadata_key, metadata_file)
ds_meta = xr.open_dataset('main.nc', decode_times=False)
ds_meta.info()
# select date and variable of interest
date = datetime.date(2017,8,1)
var = 'air_temperature_at_2_metres'
# file path patterns for remote S3 objects and corresponding local file
s3_data_ptrn = '{year}/{month}/data/{var}.nc'
data_file_ptrn = '{year}{month}_{var}.nc'
year = date.strftime('%Y')
month = date.strftime('%m')
s3_data_key = s3_data_ptrn.format(year=year, month=month, var=var)
data_file = data_file_ptrn.format(year=year, month=month, var=var)
if not os.path.isfile(data_file): # check if file already exists
print("Downloading %s from S3..." % s3_data_key)
client.download_file(era5_bucket, s3_data_key, data_file)
ds = xr.open_dataset(data_file)
ds.info
ds.coords.values()
# location coordinates
locs = [
{'name': 'santa_monica', 'lon': -118.496245, 'lat': 34.010341},
{'name': 'tallinn', 'lon': 24.753574, 'lat': 59.436962},
{'name': 'honolulu', 'lon': -157.835938, 'lat': 21.290014},
{'name': 'cape_town', 'lon': 18.423300, 'lat': -33.918861},
{'name': 'dubai', 'lon': 55.316666, 'lat': 25.266666},
]
# convert westward longitudes to degrees east
for l in locs:
if l['lon'] < 0:
l['lon'] = 360 + l['lon']
locs
ds_locs = xr.Dataset()
# interate through the locations and create a dataset
# containing the temperature values for each location
for l in locs:
name = l['name']
lon = l['lon']
lat = l['lat']
var_name = name
ds2 = ds.sel(lon=lon, lat=lat, method='nearest')
lon_attr = '%s_lon' % name
lat_attr = '%s_lat' % name
ds2.attrs[lon_attr] = ds2.lon.values.tolist()
ds2.attrs[lat_attr] = ds2.lat.values.tolist()
ds2 = ds2.rename({var : var_name}).drop(('lat', 'lon'))
ds_locs = xr.merge([ds_locs, ds2])
ds_locs.data_vars
def kelvin_to_celcius(t):
return t - 273.15
def kelvin_to_fahrenheit(t):
return t * 9/5 - 459.67
ds_locs_f = ds_locs.apply(kelvin_to_fahrenheit)
df_f = ds_locs_f.to_dataframe()
df_f.describe()
# readability please
plt.rcParams.update({'font.size': 16})
ax = df_f.plot(figsize=(18, 10), title="ERA5 Air Temperature at 2 Meters", grid=1)
ax.set(xlabel='Date', ylabel='Air Temperature (deg F)')
plt.show()
ax = df_f.plot.box(figsize=(18, 10))
ax.set(xlabel='Location', ylabel='Air Temperature (deg F)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a slippy map to get GeoJSON
Step2: Querying the Planet API.
Step3: Cleanup
Step4: Filtering our search using pandas.
Step5: Visualizing scene foot prints overlap with our AOI
Step6: Let's see what we got.
Step11: Product Activation and Downloading
Step12: Scenes ACTIVATE!
Step13: Download Scenes
Step18: Loading Images
Step19: Read Images and Use Matplotlib to show them.
Step20: Quick Histogram
Step21: Decomposing Channels
Step22: But all of these scenes are big, and we want downtown Portland
Step23: Awesome, Let's take a look at what we got.
Step24: Hrm... that's not right.
Step25: Let's take a look.... looks much better
Step26: Now let's pull it all together to do something interesting.
Step27: Finally let's process the scenes we just downloaded and make a gif.
|
<ASSISTANT_TASK:>
Python Code:
# See requirements.txt to set up your dev environment.
import sys
import os
import json
import scipy
import urllib
import datetime
import urllib3
import rasterio
import subprocess
import numpy as np
import pandas as pd
import seaborn as sns
from osgeo import gdal
from planet import api
from planet.api import filters
from traitlets import link
import rasterio.tools.mask as rio_mask
from shapely.geometry import mapping, shape
from IPython.display import display, Image, HTML
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
urllib3.disable_warnings()
from ipyleaflet import (
Map,
Marker,
TileLayer, ImageOverlay,
Polyline, Polygon, Rectangle, Circle, CircleMarker,
GeoJSON,
DrawControl
)
%matplotlib inline
# will pick up api_key via environment variable PL_API_KEY
# but can be specified using `api_key` named argument
api_keys = json.load(open("apikeys.json",'r'))
client = api.ClientV1(api_key=api_keys["PLANET_API_KEY"])
# Basemap Mosaic (v1 API)
mosaicsSeries = 'global_quarterly_2017q1_mosaic'
# Planet tile server base URL (Planet Explorer Mosaics Tiles)
mosaicsTilesURL_base = 'https://tiles0.planet.com/experimental/mosaics/planet-tiles/' + mosaicsSeries + '/gmap/{z}/{x}/{y}.png'
# Planet tile server url
mosaicsTilesURL = mosaicsTilesURL_base + '?api_key=' + api_keys["PLANET_API_KEY"]
# Map Settings
# Define colors
colors = {'blue': "#009da5"}
# Define initial map center lat/long
center = [45.5231, -122.6765]
# Define initial map zoom level
zoom = 11
# Set Map Tiles URL
planetMapTiles = TileLayer(url= mosaicsTilesURL)
# Create the map
m = Map(
center=center,
zoom=zoom,
default_tiles = planetMapTiles # Uncomment to use Planet.com basemap
)
# Define the draw tool type options
polygon = {'shapeOptions': {'color': colors['blue']}}
rectangle = {'shapeOptions': {'color': colors['blue']}}
# Create the draw controls
# @see https://github.com/ellisonbg/ipyleaflet/blob/master/ipyleaflet/leaflet.py#L293
dc = DrawControl(
polygon = polygon,
rectangle = rectangle
)
# Initialize an action counter variable
actionCount = 0
AOIs = {}
# Register the draw controls handler
def handle_draw(self, action, geo_json):
# Increment the action counter
global actionCount
actionCount += 1
# Remove the `style` property from the GeoJSON
geo_json['properties'] = {}
# Convert geo_json output to a string and prettify (indent & replace ' with ")
geojsonStr = json.dumps(geo_json, indent=2).replace("'", '"')
AOIs[actionCount] = json.loads(geojsonStr)
# Attach the draw handler to the draw controls `on_draw` event
dc.on_draw(handle_draw)
m.add_control(dc)
m
print AOIs[1]
myAOI = AOIs[1]["geometry"]
# build a query using the AOI and
# a cloud_cover filter that excludes 'cloud free' scenes
old = datetime.datetime(year=2013,month=1,day=1)
query = filters.and_filter(
filters.geom_filter(myAOI),
filters.range_filter('cloud_cover', lt=50),
filters.date_range('acquired', gt=old)
)
# build a request for only PlanetScope imagery
request = filters.build_search_request(
query, item_types=['PSScene3Band']
)
# if you don't have an API key configured, this will raise an exception
result = client.quick_search(request)
scenes = []
planet_map = {}
for item in result.items_iter(limit=500):
planet_map[item['id']]=item
props = item['properties']
props["id"] = item['id']
props["geometry"] = item["geometry"]
props["thumbnail"] = item["_links"]["thumbnail"]
scenes.append(props)
scenes = pd.DataFrame(data=scenes)
display(scenes)
print len(scenes)
# now let's clean up the datetime stuff
# make a shapely shape from our aoi
portland = shape(myAOI)
footprints = []
overlaps = []
# go through the geometry from our api call, convert to a shape and calculate overlap area.
# also save the shape for safe keeping
for footprint in scenes["geometry"].tolist():
s = shape(footprint)
footprints.append(s)
overlap = 100.0*(portland.intersection(s).area / portland.area)
overlaps.append(overlap)
# take our lists and add them back to our dataframe
scenes['overlap'] = pd.Series(overlaps, index=scenes.index)
scenes['footprint'] = pd.Series(footprints, index=scenes.index)
# now make sure pandas knows about our date/time columns.
scenes["acquired"] = pd.to_datetime(scenes["acquired"])
scenes["published"] = pd.to_datetime(scenes["published"])
scenes["updated"] = pd.to_datetime(scenes["updated"])
scenes.head()
# Now let's get it down to just good, recent, clear scenes
clear = scenes['cloud_cover']<0.1
good = scenes['quality_category']=="standard"
recent = scenes["acquired"] > datetime.date(year=2017,month=1,day=1)
partial_coverage = scenes["overlap"] > 30
good_scenes = scenes[(good&clear&recent&partial_coverage)]
display(good_scenes)
print len(good_scenes)
# Now let's get it down to just good, recent, clear scenes
clear = scenes['cloud_cover']<0.5
good = scenes['quality_category']=="standard"
all_time = scenes["acquired"] > datetime.date(year=2014,month=1,day=1)
full_coverage = scenes["overlap"] >= 60
all_scenes = scenes[(good&clear&all_time&full_coverage)]
display(all_scenes)
print len(all_scenes)
# first create a list of colors
colors = ["#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff"]
# grab our scenes from the geometry/footprint geojson
footprints = good_scenes["geometry"].tolist()
# for each footprint/color combo
for footprint,color in zip(footprints,colors):
# create the leaflet object
feat = {'geometry':footprint,"properties":{
'style':{'color': color,'fillColor': color,'fillOpacity': 0.2,'weight': 1}},
'type':u"Feature"}
# convert to geojson
gjson = GeoJSON(data=feat)
# add it our map
m.add_layer(gjson)
# now we will draw our original AOI on top
feat = {'geometry':myAOI,"properties":{
'style':{'color': "#FFFFFF",'fillColor': "#FFFFFF",'fillOpacity': 0.5,'weight': 1}},
'type':u"Feature"}
gjson = GeoJSON(data=feat)
m.add_layer(gjson)
m
imgs = []
# loop through our thumbnails and add display them
for img in good_scenes["thumbnail"].tolist():
imgs.append(Image(url=img))
print img
display(*imgs)
def get_products(client, scene_id, asset_type='PSScene3Band'):
Ask the client to return the available products for a
given scene and asset type. Returns a list of product
strings
out = client.get_assets_by_id(asset_type,scene_id)
temp = out.get()
return temp.keys()
def activate_product(client, scene_id, asset_type="PSScene3Band",product="analytic"):
Activate a product given a scene, an asset type, and a product.
On success return the return value of the API call and an activation object
temp = client.get_assets_by_id(asset_type,scene_id)
products = temp.get()
if( product in products.keys() ):
return client.activate(products[product]),products[product]
else:
return None
def download_and_save(client,product):
Given a client and a product activation object download the asset.
This will save the tiff file in the local directory and return its
file name.
out = client.download(product)
fp = out.get_body()
fp.write()
return fp.name
def scenes_are_active(scene_list):
Check if all of the resources in a given list of
scene activation objects is read for downloading.
retVal = True
for scene in scene_list:
if scene["status"] != "active":
print "{} is not ready.".format(scene)
return False
return True
to_get = good_scenes["id"].tolist()
activated = []
# for each scene to get
for scene in to_get:
# get the product
product_types = get_products(client,scene)
for p in product_types:
# if there is a visual product
if p == "visual": # p == "basic_analytic_dn"
print "Activating {0} for scene {1}".format(p,scene)
# activate the product
_,product = activate_product(client,scene,product=p)
activated.append(product)
tiff_files = []
asset_type = "_3B_Visual"
# check if our scenes have been activated
if True: #scenes_are_active(activated):
for to_download,name in zip(activated,to_get):
# create the product name
name = name + asset_type + ".tif"
# if the product exists locally
if( os.path.isfile(name) ):
# do nothing
print "We have scene {0} already, skipping...".format(name)
tiff_files.append(name)
elif to_download["status"] == "active":
# otherwise download the product
print "Downloading {0}....".format(name)
fname = download_and_save(client,to_download)
tiff_files.append(fname)
print "Download done."
else:
print "Could not download, still activating"
else:
print "Scenes aren't ready yet"
print tiff_files
def load_image4(filename):
Return a 4D (r, g, b, nir) numpy array with the data in the specified TIFF filename.
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b, g, r, nir = src.read()
return np.dstack([r, g, b, nir])
def load_image3(filename):
Return a 3D (r, g, b) numpy array with the data in the specified TIFF filename.
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b,g,r,mask = src.read()
return np.dstack([b, g, r])
def get_mask(filename):
Return a 1D mask numpy array with the data in the specified TIFF filename.
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b,g,r,mask = src.read()
return np.dstack([mask])
def rgbir_to_rgb(img_4band):
Convert an RGBIR image to RGB
return img_4band[:,:,:3]
img_files = []
masks = []
# load the images and masks
for fname in tiff_files[0:2]:
img_files.append(load_image3(fname))
masks.append(get_mask(fname))
i = 0
# use matplotlib to display the map
for img,name in zip(img_files,tiff_files):
plt.figure(i,figsize=(18,36))
plt.imshow(img)
plt.title(name)
i+=1
import numpy.ma as ma
def plot_hist4(img_4band,title=""):
# Plot a four band histogram
r, g, b, nir = img_4band[:, :, 0], img_4band[:, :, 1], img_4band[:, :, 2], img_4band[:, :, 3]
for slice_, name, color in ((r,'r', 'red'),(g,'g', 'green'),(b,'b', 'blue'), (nir, 'nir', 'magenta')):
plt.hist(slice_.ravel(), bins=100,
range=[0,img_4band.max()],
label=name, color=color, histtype='step')
plt.title(title)
plt.legend()
def plot_hist3(img_3band,mask,title=""):
# plot a three band histogramwaiter = []
r, g, b = img_3band[:, :, 0], img_3band[:, :, 1], img_3band[:, :, 2]
r = ma.masked_array(r,mask=mask)
g = ma.masked_array(g,mask=mask)
b = ma.masked_array(b,mask=mask)
for slice_, name, color in ((r,'r', 'red'),(g,'g', 'green'),(b,'b', 'blue')):
plt.hist(slice_.ravel(), bins=25,
range=[0,img_3band.max()],
label=name, color=color, histtype='step')
plt.title(title)
plt.legend()
i = 0
for img,name,mask in zip(img_files,tiff_files,masks):
plt.figure(i,figsize=(9,18))
plot_hist3(img,mask=mask,title=name)
i+=1
def plot_bands4(img,title="",i=0):
fig = plt.figure(i)
fig.set_size_inches(24, 3)
r, g, b, nir = img[:, :, 0], img[:, :, 1], img[:, :, 2], img[:, :, 3]
fig.suptitle(title)
for i, (x, c) in enumerate(((r, 'r'), (g, 'g'), (b, 'b'), (nir, 'near-ir'))):
a = fig.add_subplot(1, 4, i+1)
a.set_title(c)
plt.imshow(x)
def plot_bands3(img,title="",i=0):
fig = plt.figure(i)
fig.set_size_inches(24, 5)
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
fig.suptitle(title)
for i, (x, c) in enumerate(((r, 'r'), (g, 'g'), (b, 'b'))):
a = fig.add_subplot(1, 4, i+1)
a.set_title(c)
plt.imshow(x)
plot_bands3(img_files[0],title=tiff_files[0],i=0)
aoi_file ="portland.geojson"
# write our input AOI to a geojson file.
with open(aoi_file,"w") as f:
f.write(json.dumps(myAOI))
# create our full input and output names
clip_names = [os.path.abspath(tiff[:-4]+"_clip"+".tif") for tiff in tiff_files]
full_tif_files = [os.path.abspath("./"+tiff) for tiff in tiff_files]
for in_file,out_file in zip(tiff_files,clip_names):
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",aoi_file,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
subprocess.call(["rm",out_file])
commands.append(in_file)
commands.append(out_file)
print " ".join(commands)
subprocess.call(commands)
clip_img_files = [load_image3(fname) for fname in clip_names]
i = 0
for img,name in zip(clip_img_files,clip_names):
plt.figure(i,figsize=(6,12))
plt.imshow(img)
plt.title(name)
i+=1
subprocess.call(["rm","merged.tif"])
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",aoi_file,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
output_mosaic = "merged.tif"
for tiff in tiff_files[0:2]:
commands.append(tiff)
commands.append(output_mosaic)
print " ".join(commands)
subprocess.call(commands)
merged = load_image3("./merged.tif")
plt.figure(i,figsize=(6,12))
plt.imshow(merged)
plt.title("merged")
# Activate
to_get = all_scenes["id"].tolist()
activated = []
for scene in to_get:
product_types = get_products(client,scene)
for p in product_types:
if p == "visual": # p == "basic_analytic_dn"
print "Activating {0} for scene {1}".format(p,scene)
_,product = activate_product(client,scene,product=p)
activated.append(product)
# Download
tiff_files = []
asset_type = "_3B_Visual"
if True: #scenes_are_active(activated):
for to_download,name in zip(activated,to_get):
name = name + asset_type + ".tif"
if( os.path.isfile(name) ):
print "We have scene {0} already, skipping...".format(name)
tiff_files.append(name)
elif to_download["status"] == "active":
print "Downloading {0}....".format(name)
fname = download_and_save(client,to_download)
tiff_files.append(fname)
print "Download done."
else:
print "Could not download, still activating"
else:
print "Scenes aren't ready yet"
tiff_files = sorted(tiff_files)
# Create a list of tif file names.
for tiff in tiff_files:
clip_names.append(os.path.abspath(tiff[:-4]+"_clip"+".tif"))
full_tif_files = []
for tiff in tiff_files:
full_tif_files.append(os.path.abspath("./"+tiff))
# Run GDAL to crop our file down.
for in_file,out_file in zip(tiff_files,clip_names):
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",aoi_file,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
subprocess.call(["rm",out_file])
commands.append(in_file)
commands.append(out_file)
print " ".join(commands)
subprocess.call(commands)
temp_names = []
i = 0
# use image magic convert to
for in_file in clip_names:
temp_name = "img{0}.gif".format(i)
command = ["convert", in_file, "-sample", "30x30%",temp_name]
temp_names.append(temp_name)
i += 1
subprocess.call(command)
magic = "portland.gif"
last_call = ["convert","-delay", "40","-loop","0", "img*.gif",magic]
subprocess.call(last_call)
print "done!"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also prepared a simple tabulated file with the description of each GSM. It will be usefull to calculate LFC.
Step2: We can look in to this file
Step3: Now we select the GSMs that are controls.
Step4: Using GEOparse we can download experiments and look into the data
Step5: The GPL we are interested
Step6: And the columns that are available for exemplary GSM
Step7: We take the opportunity and check if everything is OK with the control samples. For this we just use simple histogram. To obtain table with each GSM as column, ID_REF as index and VALUE in each cell we use pivot_samples method from GSE object (we restrict the columns to the controls)
Step8: And we plot
Step9: Next we would like to filter out probes that are not expressed. The gene is expressed (in definition here) when its average log2 intensity in control samples is above 0.25 quantile. I.e. we filter out worst 25% genes.
Step10: We can see that the filtering succeeded. Now we can pivot all the samples and filter out probes that are not expressed
Step11: The most important thing is to calculate log fold changes. What we have to do is for each time-point identify control and transfected sample and subtract the VALUES (they are provided as log2 transformed already, we subtract transfection from the control). In the end we create new DataFrame with LFCs
Step12: Let's look at the data sorted by 24-hours time-point
Step13: We are interested in the gene expression changes upon transfection. Thus, we have to annotate each probe with ENTREZ gene ID, remove probes without ENTREZ or with multiple assignments. Although this strategy might not be optimal, after this we average the LFC for each gene over probes.
Step14: We can now look at the data
Step15: At that point our job is basicaly done. However, we might want to check if the experiments worked out at all. To do this we will use hsa-miR-124a-3p targets predicted by MIRZA-G algorithm. The targets should be downregulated. First we read MIRZA-G results
Step16: We shall extract targets as a simple list of strings
Step17: As can be seen there is a lot of targets (genes that posses a seed match in their 3'UTRs). We will use all of them. As first stem we will annotate genes if they are targets or not and add this information as a column to DataFrame
Step18: In the end we can plot the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import GEOparse
import pandas as pd
import pylab as pl
import seaborn as sns
pl.rcParams['figure.figsize'] = (14, 10)
pl.rcParams['ytick.labelsize'] = 12
pl.rcParams['xtick.labelsize'] = 11
pl.rcParams['axes.labelsize'] = 23
pl.rcParams['legend.fontsize'] = 20
sns.set_style('ticks')
c1, c2, c3, c4 = sns.color_palette("Set1", 4)
experiments = pd.read_table("GSE6207_experiments.tab")
experiments
controls = experiments[experiments.Type == 'control'].Experiment.tolist()
gse = GEOparse.get_GEO("GSE6207")
gse.gpls['GPL570'].columns
gse.gsms["GSM143385"].columns
pivoted_control_samples = gse.pivot_samples('VALUE')[controls]
pivoted_control_samples.head()
pivoted_control_samples.hist()
sns.despine(offset=10, trim=True)
pivoted_control_samples_average = pivoted_control_samples.median(axis=1)
print "Number of probes before filtering: ", len(pivoted_control_samples_average)
expression_threshold = pivoted_control_samples_average.quantile(0.25)
expressed_probes = pivoted_control_samples_average[pivoted_control_samples_average >= expression_threshold].index.tolist()
print "Number of probes above threshold: ", len(expressed_probes)
samples = gse.pivot_samples("VALUE").ix[expressed_probes]
lfc_results = {}
sequence = ['4 hours',
'8 hours',
'16 hours',
'24 hours',
'32 hours',
'72 hours',
'120 hours']
for time, group in experiments.groupby("Time"):
print time
control_name = group[group.Type == "control"].Experiment.iloc[0]
transfection_name = group[group.Type == "transfection"].Experiment.iloc[0]
lfc_results[time] = (samples[transfection_name] - samples[control_name]).to_dict()
lfc_results = pd.DataFrame(lfc_results)[sequence]
lfc_results.sort("24 hours").head()
# annotate with GPL
lfc_result_annotated = lfc_results.reset_index().merge(gse.gpls['GPL570'].table[["ID", "ENTREZ_GENE_ID"]],
left_on='index', right_on="ID").set_index('index')
del lfc_result_annotated["ID"]
# remove probes without ENTREZ
lfc_result_annotated = lfc_result_annotated.dropna(subset=["ENTREZ_GENE_ID"])
# remove probes with more than one gene assigned
lfc_result_annotated = lfc_result_annotated[~lfc_result_annotated.ENTREZ_GENE_ID.str.contains("///")]
# for each gene average LFC over probes
lfc_result_annotated = lfc_result_annotated.groupby("ENTREZ_GENE_ID").median()
lfc_result_annotated.sort("24 hours").head()
header = ["GeneID", "miRNA", "Total score without conservation", "Total score with conservation"]
miR124_targets = pd.read_table("seed-mirza-g_all_mirnas_per_gene_scores_miR_124a.tab", names=header)
miR124_targets.head()
miR124_targets_list = map(str, miR124_targets.GeneID.tolist())
print "Number of targets:", len(miR124_targets_list)
lfc_result_annotated["Is miR-124a target"] = [i in miR124_targets_list for i in lfc_result_annotated.index]
cols_to_plot = [i for i in lfc_result_annotated.columns if "hour" in i]
a = sns.pointplot(data=lfc_result_annotated[lfc_result_annotated["Is miR-124a target"]][cols_to_plot],
color=c2,
label="miR-124a target")
b = sns.pointplot(data=lfc_result_annotated[~lfc_result_annotated["Is miR-124a target"]][cols_to_plot],
color=c1,
label="No miR-124a target")
sns.despine()
pl.legend([pl.mpl.patches.Patch(color=c2), pl.mpl.patches.Patch(color=c1)],
["miR-124a target", "No miR-124a target"], frameon=True, loc='lower left')
pl.xlabel("Time after transfection")
pl.ylabel("Median log2 fold change")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Geometry
Step2: The idea is hence to forge a transmission line of variable characteristic impedance. In this example, the width of the metallization $W$ is varied along the transmission line length. Most common profile are summarized in the figure below
Step3: Linear Taper
Step4: Exponential Taper
|
<ASSISTANT_TASK:>
Python Code:
import skrf as rf
import numpy as np
import matplotlib.pyplot as plt
from skrf.media import MLine
rf.stylely()
# Model Parameters
freq = rf.Frequency(1, 20, unit='GHz', npoints=191)
w1 = 20*rf.mil # conductor width [m]
w2 = 90*rf.mil # conductor width [m]
h = 20*rf.mil # dielectric thickness [m]
t = 0.7*rf.mil # conductor thickness [m]
rho = 1.724138e-8 # Copper resistivity [Ohm.m]
ep_r = 10 # dielectric relative permittivity
rough = 1e-6 # conductor RMS roughtness [m]
taper_length = 200*rf.mil # [m]
# Media definitions
microstrip_w1 = MLine(freq, w=w1, h=h, t=t, rho=rho, ep_r=ep_r, rough=rough)
microstrip_w2 = MLine(freq, w=w2, h=h, t=t, rho=rho, ep_r=ep_r, rough=rough)
# piece of transmission lines connected to the taper
line1 = microstrip_w1.line(d=50, unit='mil', name='feeder')
line2 = microstrip_w2.line(d=50, unit='mil', name='feeder')
# loading resistor
resistor = microstrip_w2.resistor(R=15)
fig, ax = plt.subplots()
z = np.linspace(0, taper_length)
ax.plot(z, (w2-w1)/taper_length*z + w1, lw=2, label='linear')
ax.plot(z, w1*np.exp(z/taper_length*(np.log(w2/w1))), lw=2, label='exponential')
ax.set_xticks([0, taper_length])
ax.set_xticklabels(['0', 'taper length'])
ax.set_yticks([w1, w2])
ax.set_yticklabels(['$W_1$', '$W_2$'])
ax.legend()
ax.set_title('Parameter profile along the taper length')
# create a 2-port Network
taper_linear = rf.taper.Linear(med=MLine, param='w', start=w1, stop=w2,
length=taper_length, n_sections=50,
med_kw={'frequency': freq, 'h': h, 't':t, 'ep_r': ep_r,
'rough': rough, 'rho': rho}).network
print(taper_linear)
# build the full circuit
# equivalent to ntwk = line1 ** taper_linear ** resistor ** line2 ** microstrip_w2.short()
ntwk_linear = rf.cascade_list([line1, taper_linear, line2, resistor, microstrip_w2.short()])
fig, ax = plt.subplots()
ax.plot(ntwk_linear.frequency.f_scaled, ntwk_linear.s_mag[:,0], lw=2, label='scikit-rf - Linear')
f_ref, s_mag_ref = np.loadtxt('ANSYS_Circuit_taper_linear_s_mag.csv', delimiter=',', skiprows=1, unpack=True)
ax.plot(f_ref, s_mag_ref, label='ANSYS Circuit - Linear Taper', lw=2, ls='--')
ax.set_xlabel('f [GHz]')
ax.set_ylabel('$|s_{11}|$')
ax.set_ylim(0, 0.6)
ax.set_xlim(1, 20)
ax.legend()
taper_exp = rf.taper.Exponential(med=MLine, param='w', start=w1, stop=w2,
length=taper_length, n_sections=50,
med_kw={'frequency': freq, 'h': h, 't':t, 'ep_r': ep_r,
'rough': rough, 'rho': rho}).network
ntwk_exp = line1 ** taper_exp ** line2 ** resistor ** microstrip_w2.short()
fig, ax = plt.subplots()
ax.plot(ntwk_exp.frequency.f_scaled, ntwk_exp.s_mag[:,0], lw=2, label='scikit-rf - Exponential')
f_ref, s_mag_ref = np.loadtxt('ANSYS_Circuit_taper_exponential_s_mag.csv', delimiter=',', skiprows=1, unpack=True)
ax.plot(f_ref, s_mag_ref, label='ANSYS Circuit - Exponential Taper', lw=2, ls='--')
ax.set_xlabel('f [GHz]')
ax.set_ylabel('$|s_{11}|$')
ax.set_ylim(0, 0.6)
ax.set_xlim(1, 20)
ax.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pmf is the actual distribution; new_pmf is the biased
Step2: PmfOfWaitTime makes a meta-Pmf that maps from each uniform
Step3: low and high are the range of the uniform
Step4: MakeRange defines a set of possible values for wait time
Step5: The parameter, pmf_z, is the unbiased distribution of z.
Step6: So the distribution of x is also uniform from 0 to
Step7: pmf_z is the given distribution of gap times.
Step8: The parameters are the WaitTimeCalculator, the passenger
Step9: prior_x and posterior_x are the prior and posterior distributions of
Step10: As always, Likelihood takes a hypothesis and data, and
Step11: pmf_zb is the distribution of gaps between trains; pmf_x is the
Step12: The subtraction operator invokes Pmf.__sub__, which enumerates all
Step13: Figure [fig.redline3] shows the results. The prior distribution of
Step14: This Likelihood might look familiar; it is almost identical
Step15: __init__ builds hypos, which is a sequence of
Step16: Next I bias pmf_z to get the distribution of zb, draw a
Step17: Cdf.Sample is more efficient than Pmf.Sample,
Step18: Finally I unbias the distribution of zb to get the
Step19: This process is complicated, but all of the steps are operations we have
|
<ASSISTANT_TASK:>
Python Code:
def BiasPmf(pmf):
new_pmf = pmf.Copy()
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
def PmfOfWaitTime(pmf_zb):
metapmf = thinkbayes.Pmf()
for gap, prob in pmf_zb.Items():
uniform = MakeUniformPmf(0, gap)
metapmf.Set(uniform, prob)
pmf_y = thinkbayes.MakeMixture(metapmf)
return pmf_y
def MakeUniformPmf(low, high):
pmf = thinkbayes.Pmf()
for x in MakeRange(low=low, high=high):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
def MakeRange(low, high, skip=10):
return range(low, high+skip, skip)
class WaitTimeCalculator(object):
def __init__(self, pmf_z):
self.pmf_z = pmf_z
self.pmf_zb = BiasPmf(pmf)
self.pmf_y = self.PmfOfWaitTime(self.pmf_zb)
self.pmf_x = self.pmf_y
x = zp - y
wtc = WaitTimeCalculator(pmf_z)
ete = ElapsedTimeEstimator(wtc,
lam=2.0/60,
num_passengers=15)
class ElapsedTimeEstimator(object):
def __init__(self, wtc, lam, num_passengers):
self.prior_x = Elapsed(wtc.pmf_x)
self.post_x = self.prior_x.Copy()
self.post_x.Update((lam, num_passengers))
self.pmf_y = PredictWaitTime(wtc.pmf_zb, self.post_x)
class Elapsed(thinkbayes.Suite):
def Likelihood(self, data, hypo):
x = hypo
lam, k = data
like = thinkbayes.EvalPoissonPmf(k, lam * x)
return like
def PredictWaitTime(pmf_zb, pmf_x):
pmf_y = pmf_zb - pmf_x
RemoveNegatives(pmf_y)
return pmf_y
pmf_y = pmf_zb - pmf_x
def RemoveNegatives(pmf):
for val in pmf.Values():
if val < 0:
pmf.Remove(val)
pmf.Normalize()
class ArrivalRate(thinkbayes.Suite):
def Likelihood(self, data, hypo):
lam = hypo
y, k = data
like = thinkbayes.EvalPoissonPmf(k, lam * y)
return like
class ArrivalRateEstimator(object):
def __init__(self, passenger_data):
low, high = 0, 5
n = 51
hypos = numpy.linspace(low, high, n) / 60
self.prior_lam = ArrivalRate(hypos)
self.post_lam = self.prior_lam.Copy()
for k1, y, k2 in passenger_data:
self.post_lam.Update((y, k2))
import thinkbayes
n = 220
cdf_z = thinkbayes.MakeCdfFromList(gap_times)
sample_z = cdf_z.Sample(n)
pmf_z = thinkbayes.MakePmfFromList(sample_z)
cdf_zp = BiasPmf(pmf_z).MakeCdf()
sample_zb = cdf_zp.Sample(n) + [1800, 2400, 3000]
pdf_zb = thinkbayes.EstimatedPdf(sample_zb)
xs = MakeRange(low=60)
pmf_zb = pdf_zb.MakePmf(xs)
pmf_z = UnbiasPmf(pmf_zb)
wtc = WaitTimeCalculator(pmf_z)
def ProbLongWait(num_passengers, minutes):
ete = ElapsedTimeEstimator(wtc, lam, num_passengers)
cdf_y = ete.pmf_y.MakeCdf()
prob = 1 - cdf_y.Prob(minutes * 60)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CSV to List
Step2: Dictionary to CSV
Step3: CSV to Dictionary
Step4: Pandas for CSV file operations
Step5: CSV to DataFrame
Step6: DataFrame to Excel
Step7: Excel to DataFrame
|
<ASSISTANT_TASK:>
Python Code:
# For reading/writing CSV files
import csv
# For listing system file folders
from subprocess import check_output
# Use with open to ensure file is closed when block ends
# The wb flag opens file for writing
with open('data/fileops/vehicles.csv', 'wb') as csv_file:
# Prepare csv writer
wtr = csv.writer(csv_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
# Write CSV header row
wtr.writerow(['type', 'wheels', 'speed', 'weight', 'invented'])
# Write CSV data rows
wtr.writerow(['Scooter', 2, 150, 109.78, 1817])
wtr.writerow(['Car', 4, 250, 1818.45, 1885])
wtr.writerow(['Plane', 10, 850, 270000, 1903])
# Check file created
print(check_output(["ls", "data/fileops"]).decode("utf8"))
# The rb flag opens file for reading
with open('data/fileops/vehicles.csv', 'rb') as csv_file:
rdr = csv.reader(csv_file, delimiter=',', quotechar='"')
for row in rdr:
print '\t'.join(row)
# Dictionary data structures can be used to represent rows
game1_scores = {'Game':'Quarter', 'Team A': 45, 'Team B': 90}
game2_scores = {'Game':'Semi', 'Team A': 80, 'Team B': 32}
game3_scores = {'Game':'Final', 'Team A': 70, 'Team B': 68}
headers = ['Game', 'Team A', 'Team B']
# Create CSV from dictionaries
with open('data/fileops/game-scores.csv', 'wb') as df:
dict_wtr = csv.DictWriter(df, fieldnames=headers)
dict_wtr.writeheader()
dict_wtr.writerow(game1_scores)
dict_wtr.writerow(game2_scores)
dict_wtr.writerow(game3_scores)
print(check_output(["ls", "data/fileops"]).decode("utf8"))
# Read CSV into dictionary data structure
with open('data/fileops/game-scores.csv', 'rb') as df:
dict_rdr = csv.DictReader(df)
for row in dict_rdr:
print('\t'.join([row['Game'], row['Team A'], row['Team B']]))
print('\t'.join(row.keys()))
import pandas as pd
# Create a DataFrame
df = pd.DataFrame({
'Name' : ['Josh', 'Eli', 'Ram', 'Bil'],
'Sales' : [34.32, 12.1, 4.77, 31.63],
'Region' : ['North', 'South', 'West', 'East'],
'Product' : ['PC', 'Phone', 'SW', 'Cloud']})
df
# DataFrame to CSV
df.to_csv('data/fileops/sales.csv', index=False)
print(check_output(["ls", "data/fileops"]).decode("utf8"))
# CSV to DataFrame
df2 = pd.read_csv('data/fileops/sales.csv')
df2
# DataFrame to XLSX Excel file
df.to_excel('data/fileops/sales.xlsx', index=False)
print(check_output(["ls", "data/fileops"]).decode("utf8"))
# Excel to DataFrame
df3 = pd.read_excel('data/fileops/sales.xlsx')
df3
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Data analysis
Step2: 3.2 Calculating the diffusion coefficient
Step3: Use the function <tt>curve_fit()</tt> from the module <tt>scipy.optimize</tt> to produce a fit for the linear regime and determine the diffusion coefficients for the different $\gamma$s.
Step4: Calculate the diffusion coefficient for all cases and plot them as a function of $\gamma$. What relation do you observe?
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import logging
import sys
import espressomd
import espressomd.accumulators
import espressomd.observables
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
# Constants
KT = 1.1
STEPS = 400000
# System setup
system = espressomd.System(box_l=[16] * 3)
system.time_step = 0.01
system.cell_system.skin = 0.4
system.part.add(pos=[0, 0, 0])
# Run for different friction coefficients
gammas = [1.0, 2.0, 4.0, 10.0]
tau_results = []
msd_results = []
for gamma in gammas:
system.auto_update_accumulators.clear()
system.thermostat.turn_off()
system.thermostat.set_langevin(kT=KT, gamma=gamma, seed=42)
logging.info("Equilibrating the system.")
system.integrator.run(1000)
logging.info("Equilibration finished.")
# Setup observable correlator
correlator = correlator_msd(0, STEPS)
system.auto_update_accumulators.add(correlator)
logging.info("Sampling started for gamma = {}.".format(gamma))
system.integrator.run(STEPS)
correlator.finalize()
tau_results.append(correlator.lag_times())
msd_results.append(np.sum(correlator.result().reshape([-1, 3]), axis=1))
logging.info("Sampling finished.")
%matplotlib notebook
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.figure(figsize=(10, 10))
plt.xlabel(r'$\tau$ [$\Delta t$]')
plt.ylabel(r'MSD [$\sigma^2$]')
for index, (tau, msd) in enumerate(zip(tau_results, msd_results)):
# We skip the first entry since it's zero by definition and cannot be displayed
# in a loglog plot. Furthermore, we only look at the first 100 entries due to
# the high variance for larger lag times.
plt.loglog(tau[1:100], msd[1:100], label=r'$\gamma=${:.1f}'.format(gammas[index]))
plt.legend()
plt.show()
import scipy.optimize
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
# cutoffs for the ballistic regime (different for each gamma value)
tau_p_values = [14, 12, 10, 7]
plt.figure(figsize=(10, 10))
plt.xlabel(r'$\tau$ [$\Delta t$]')
plt.ylabel(r'MSD [$\sigma^2$]')
for index, (tau_p, tau, msd) in enumerate(zip(tau_p_values, tau_results, msd_results)):
(a, b, c), _ = scipy.optimize.curve_fit(quadratic, tau[:tau_p], msd[:tau_p])
x = np.linspace(tau[0], tau[max(tau_p_values) - 1], 50)
p = plt.plot(x, quadratic(x, a, b, c), '-')
plt.plot(tau[:max(tau_p_values)], msd[:max(tau_p_values)], 'o', color=p[0].get_color(),
label=r'$\gamma=${:.1f}'.format(gammas[index]))
plt.legend()
plt.show()
def linear(x, a, b):
return a * x + b
# cutoffs for the diffusive regime (different for each gamma value)
tau_f_values = [24, 22, 20, 17]
# cutoff for the data series (larger lag times have larger variance due to undersampling)
cutoff_limit = 90
diffusion_results = []
plt.figure(figsize=(10, 8))
plt.xlabel(r'$\tau$ [$\Delta t$]')
plt.ylabel(r'MSD [$\sigma^2$]')
for index, (tau_f, tau, msd) in enumerate(zip(tau_f_values, tau_results, msd_results)):
(a, b), _ = scipy.optimize.curve_fit(linear, tau[tau_f:cutoff_limit], msd[tau_f:cutoff_limit])
x = np.linspace(tau[tau_f], tau[cutoff_limit - 1], 50)
p = plt.plot(x, linear(x, a, b), '-')
plt.plot(tau[tau_f:cutoff_limit], msd[tau_f:cutoff_limit], 'o', color=p[0].get_color(),
label=r'$\gamma=${:.1f}'.format(gammas[index]))
diffusion_results.append(a / 6)
plt.legend()
plt.show()
plt.figure(figsize=(10, 8))
plt.xlabel(r'$\gamma$')
plt.ylabel('Diffusion coefficient [$\sigma^2/t$]')
x = np.linspace(0.9 * min(gammas), 1.1 * max(gammas), 50)
y = KT / x
plt.plot(x, y, '-', label=r'$k_BT\gamma^{-1}$')
plt.plot(gammas, diffusion_results, 'o', label='D')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For simplicity, we will use gsl_blas_dgemm as a stand-in for the "C/C++ library based on GSL." To make our life easier, we will wrap up the bindings to GSL and GSLBLAS into a single reflection dictionary. This is overkill for simple projects, but if we want access to all of GSL (as opposed to GSL and GSLBLAS separately, say), this approach is fine.
Step2: Next, we write a selection file that picks up all interesting bits from GSL. This file can be kept simple because of the proper naming conventions (it could have been simpler still if GSL were a C++ library, living in a single namespace). We then run the genreflex command to generate the dictionary file and compile it, linking in GSL and GSLBLAS. Finally, we're ready to load the dictionary reflection file into cppyy.
|
<ASSISTANT_TASK:>
Python Code:
import cppyy
# first, pull in all headers from the GSL installation directory (/usr/include on my system).
import glob, os
GSL_HOME = '/usr/include'
gsl_headers = [os.path.relpath(x, GSL_HOME) for x in glob.glob(GSL_HOME+'/gsl/*.h')]
%%file gsl_selection.xml
<lcgdict>
<struct pattern="gsl_*" />
<function pattern="gsl_*" />
<enum pattern="GSL*" />
<enum pattern="CBLAS*" />
</lcgdict>
# conventional name for generated output
rfldct = 'GSLDict'
if not os.path.exists('%s_rflx.cpp' % rfldct):
import subprocess, sys
# generate the reflection dictionary
try:
subprocess.check_output(
['genreflex', # utility installed by pip when installing cppyy
'-s', 'gsl_selection.xml', # selection file (see above)
'-o', '%s_rflx.cpp'%rfldct, # intermediate output file
'-I'+GSL_HOME]+ # include search path for GSL headers
gsl_headers) # headers themselves
except subprocess.CalledProcessError as e:
print("genreflex failed (%d):" % e.returncode, e.output)
else:
print("genreflex done")
if not os.path.exists('%s.so' % rfldct):
# get command line arguments for compiler from cling
try:
clingflags = subprocess.check_output(
['cling-config', # utility installed by pip when installing cppyy
'--cppflags'])
except subprocess.CalledProcessError as e:
print('cling-config failed (%d):' % e.returncode, e.output)
raise
else:
print('cling-config done')
# compile generated file
try:
subprocess.check_output(
['g++', # C++ compiler
'-fPIC', # require position independent code
'-shared', # generate shared library
'-o', '%s.so'%rfldct, # output file
'-I'+GSL_HOME, # include search path for GSL headers
'%s_rflx.cpp'%rfldct]+ # intermediate file to compile
clingflags.split()+ # extra flags provided by cling
['-lgsl', '-lgslcblas']) # link in GSL and GSLBLAS
except subprocess.CalledProcessError as e:
print('compilation failed (%d):' % e.returncode, e.output)
else:
print('compilation done')
# load the generated dictionary
cppyy.load_reflection_info(rfldct)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we generate Y. Remember that Y is supposed to have a deep economic link to X, so the price of Y should vary pretty similarly. We model this by taking X, shifting it up and adding some random noise drawn from a normal distribution.
Step2: Def
Step3: As a sanity check let's run a cointegration test on the pair of fake securities X and Y.
Step4: Here's a method I wrote to look through a list of securities and test for cointegration between all pairs. It returns a cointegration test score matrix, a p-value matrix, and any pairs for which the p-value was less than 0.05.
Step5: Looking for Cointegrated Pairs of Alternative Energy Securities
Step6: Now we'll run our method on the list and see if any pairs are cointegrated.
Step7: Looks like 'ABGB' and 'FSLR' are cointegrated. Let's take a look at the prices to make sure there's nothing weird going on.
Step8: Def
Step9: We can use the moving averages to compute the z-score of the difference at each given time. This will tell us how extreme the difference is and whether it's a good idea to enter a position at this time. Let's take a look at the z-score now.
Step10: The z-score doesn't mean much out of context, let's plot it next to the prices to get an idea of what it looks like. We'll take the negative of the z-score because the differences were all negative and that's kinda confusing.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
X_returns = np.random.normal(0, 1, 100) # Generate the daily returns
# sum them and shift all the prices up into a reasonable range
X = pd.Series(np.cumsum(X_returns), name='X') + 50
X.plot()
some_noise = np.random.normal(0, 1, 100)
Y = X + 5 + some_noise
Y.name = 'Y'
pd.concat([X, Y], axis=1).plot()
import statsmodels
from statsmodels.tsa.stattools import coint
score, pvalue, _ = coint(X, Y)
print pvalue
print pvalue < 0.05
def find_cointegrated_pairs(securities_panel):
n = len(securities_panel.minor_axis)
score_matrix = np.zeros((n, n))
pvalue_matrix = np.ones((n, n))
keys = securities_panel.keys
pairs = []
for i in range(n):
for j in range(i+1, n):
S1 = securities_panel.minor_xs(securities_panel.minor_axis[i])
S2 = securities_panel.minor_xs(securities_panel.minor_axis[j])
result = coint(S1, S2)
score = result[0]
pvalue = result[1]
score_matrix[i, j] = score
pvalue_matrix[i, j] = pvalue
if pvalue < 0.05:
pairs.append((securities_panel.minor_axis[i], securities_panel.minor_axis[j]))
return score_matrix, pvalue_matrix, pairs
symbol_list = ['ABGB', 'ASTI', 'CSUN', 'DQ', 'FSLR', 'HK', 'AAPL', 'MSFT']
securities_panel = get_pricing(symbol_list, fields=['price'], start_date='2014-01-01', end_date='2015-01-01')
securities_panel.minor_axis = map(lambda x: x.symbol, securities_panel.minor_axis)
# series_list = load_prices(symbol_list)
scores, pvalues, pairs = find_cointegrated_pairs(securities_panel)
import seaborn
seaborn.heatmap(pvalues, xticklabels=symbol_list, yticklabels=symbol_list, mask = (pvalues >= 0.5))
print pairs
S = securities_panel.loc['price', :, ['ABGB', 'FSLR']]
S.plot()
# Get the difference
difference = securities_panel.loc['price', :, 'ABGB'] - securities_panel.loc['price', :, 'FSLR']
difference.name = 'diff'
# Get the 5 day moving average of the difference
diff_mavg5 = pd.rolling_mean(difference, window=5)
diff_mavg5.name = 'diff 5d mavg'
# Get the 60 day moving average
diff_mavg60 = pd.rolling_mean(difference, window=60)
diff_mavg60.name = 'diff 60d mavg'
pd.concat([diff_mavg60, diff_mavg5, difference], axis=1).plot()
# Take a rolling 60 day standard deviation
std_60 = pd.rolling_std(difference, window=60)
std_60.name = 'std 60d'
# Compute the z score at each time
zscore = (diff_mavg5 - diff_mavg60)/std_60
zscore.name = 'z-score'
zscore.plot()
# Plot the prices scaled down along with the negative z-score
pd.concat([S/10, -zscore], axis=1).plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First download data from the city of Berkeley's API. You can use Socrata's $limit parameter to specify how many rows to grab (otherwise the default is 1,000 rows of data)
Step2: Next, turn the json data into a dataframe and clean it up a bit
Step3: Finally, convert each row in the dataframe to a geojson-formatted feature and save the result as a file. The format is pretty simple and you can see it here
Step4: In nteract, we can display geojson directly with the built-in leaflet renderer.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd, requests, json
# API endpoint for city of Berkeley's 311 calls
endpoint_url = 'https://data.cityofberkeley.info/resource/k489-uv4i.json?$limit=20'
# fetch the URL and load the data
response = requests.get(endpoint_url)
data = response.json()
# turn the json data into a dataframe and see how many rows and what columns we have
df = pd.DataFrame(data)
print('We have {} rows'.format(len(df)))
str(df.columns.tolist())
# convert lat-long to floats and change address from ALL CAPS to regular capitalization
df['latitude'] = df['latitude'].astype(float)
df['longitude'] = df['longitude'].astype(float)
df['street_address'] = df['street_address'].str.title()
# we don't need all those columns - only keep useful ones
cols = ['issue_description', 'issue_type', 'latitude', 'longitude', 'street_address', 'ticket_status']
df_subset = df[cols]
# drop any rows that lack lat/long data
df_geo = df_subset.dropna(subset=['latitude', 'longitude'], axis=0, inplace=False)
print('We have {} geotagged rows'.format(len(df_geo)))
df_geo.tail()
# what is the distribution of issue types?
df_geo['issue_type'].value_counts()
def df_to_geojson(df, properties, lat='latitude', lon='longitude'):
# create a new python dict to contain our geojson data, using geojson format
geojson = {'type':'FeatureCollection', 'features':[]}
# loop through each row in the dataframe and convert each row to geojson format
for _, row in df.iterrows():
# create a feature template to fill in
feature = {'type':'Feature',
'properties':{},
'geometry':{'type':'Point',
'coordinates':[]}}
# fill in the coordinates
feature['geometry']['coordinates'] = [row[lon],row[lat]]
# for each column, get the value and add it as a new feature property
for prop in properties:
feature['properties'][prop] = row[prop]
# add this feature (aka, converted dataframe row) to the list of features inside our dict
geojson['features'].append(feature)
return geojson
cols = ['street_address', 'issue_description', 'issue_type', 'ticket_status']
geojson = df_to_geojson(df_geo, cols)
import IPython
IPython.display.display({'application/geo+json': geojson}, raw=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array([[0, 1], [2, 1], [4, 8]])
mask = (a.min(axis=1,keepdims=1) == a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the Kernel
Step2: Before you begin
Step3: Region
Step4: Timestamp
Step5: Authenticate your GCP account
Step6: Create a Cloud Storage bucket
Step7: Only if your bucket doesn't already exist
Step8: Finally, validate access to your Cloud Storage bucket by examining its contents
Step9: Set up variables
Step10: AutoML constants
Step11: Clients
Step12: Prepare a trainer script
Step13: Task.py contents
Step14: Store training script on your Cloud Storage bucket
Step15: Train a model
Step16: Example output
Step17: Response
Step18: Example output
Step19: projects.jobs.get
Step20: Response
Step21: Example output
Step22: Serving function for trained model (image data)
Step23: Example output
Step24: Example output
Step25: Example output
Step26: Response
Step27: Example output
Step28: projects.jobs.get
Step29: Response
Step30: Example output
Step31: Example output
Step32: Example output
Step33: Response
Step34: Example output
Step35: projects.models.versions.create
Step36: Example output
Step37: Response
Step38: Example output
Step39: Prepare input for online prediction
Step40: projects.predict
Step41: Example output
Step42: Response
Step43: Example output
Step44: Response
Step45: Example output
|
<ASSISTANT_TASK:>
Python Code:
! pip3 install google-cloud-storage
import os
if not os.getenv("AUTORUN"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access
# to your Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AutoML, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this tutorial in a notebook locally, replace the string
# below with the path to your service account key and run this cell to
# authenticate your Google Cloud account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json
# Log in to your account on Google Cloud
! gcloud auth login
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION gs://$BUCKET_NAME
! gsutil ls -al gs://$BUCKET_NAME
import json
import time
from google.protobuf.json_format import MessageToJson
from google.protobuf.struct_pb2 import Value
from googleapiclient import discovery, errors
# AutoM location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID
cloudml = discovery.build("ml", "v1")
! rm -rf cifar
! mkdir cifar
! touch cifar/README.md
setup_cfg = "[egg_info]\n\
tag_build =\n\
tag_date = 0"
! echo "$setup_cfg" > cifar/setup.cfg
setup_py = "import setuptools\n\
# Requires TensorFlow Datasets\n\
setuptools.setup(\n\
install_requires=[\n\
'tensorflow_datasets==1.3.0',\n\
],\n\
packages=setuptools.find_packages())"
! echo "$setup_py" > cifar/setup.py
pkg_info = "Metadata-Version: 1.0\n\
Name: Custom Training CIFAR-10\n\
Version: 0.0.0\n\
Summary: Demonstration training script\n\
Home-page: www.google.com\n\
Author: Google\n\
Author-email: aferlitsch@google.com\n\
License: Public\n\
Description: Demo\n\
Platform: Vertex AI"
! echo "$pkg_info" > cifar/PKG-INFO
! mkdir cifar/trainer
! touch cifar/trainer/__init__.py
%%writefile cifar/trainer/task.py
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default='/tmp/saved_model', type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
NUM_WORKERS = strategy.num_replicas_in_sync
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
! rm -f cifar.tar cifar.tar.gz
! tar cvf cifar.tar cifar
! gzip cifar.tar
! gsutil cp cifar.tar.gz gs://$BUCKET_NAME/trainer_cifar.tar.gz
JOB_NAME = "custom_job_TF_" + TIMESTAMP
TRAINING_INPUTS = {
"scaleTier": "CUSTOM",
"masterType": "n1-standard-4",
"masterConfig": {"acceleratorConfig": {"count": "1", "type": "NVIDIA_TESLA_K80"}},
"packageUris": ["gs://" + BUCKET_NAME + "/trainer_cifar.tar.gz"],
"pythonModule": "trainer.task",
"args": [
"--model-dir=" + "gs://{}/{}".format(BUCKET_NAME, JOB_NAME),
"--epochs=" + str(20),
"--steps=" + str(100),
"--distribute=" + "single",
],
"region": REGION,
"runtimeVersion": "2.1",
"pythonVersion": "3.7",
}
body = {"jobId": JOB_NAME, "trainingInput": TRAINING_INPUTS}
request = cloudml.projects().jobs().create(parent=PARENT)
request.body = json.loads(json.dumps(body, indent=2))
print(json.dumps(json.loads(request.to_json()), indent=2))
request = cloudml.projects().jobs().create(parent=PARENT, body=body)
response = request.execute()
print(json.dumps(response, indent=2))
# The full unique ID for the custom training job
custom_training_id = f'{PARENT}/jobs/{response["jobId"]}'
# The short numeric ID for the custom training job
custom_training_short_id = response["jobId"]
print(custom_training_id)
request = cloudml.projects().jobs().get(name=custom_training_id)
response = request.execute()
print(json.dumps(response, indent=2))
while True:
response = cloudml.projects().jobs().get(name=custom_training_id).execute()
if response["state"] != "SUCCEEDED":
print("Training job has not completed:", response["state"])
if response["state"] == "FAILED":
break
else:
break
time.sleep(20)
# model artifact output directory on Google Cloud Storage
model_artifact_dir = response["trainingInput"]["args"][0].split("=")[-1]
print("artifact location " + model_artifact_dir)
import tensorflow as tf
model = tf.keras.models.load_model(model_artifact_dir)
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
@tf.function(
input_signature=[tf.TensorSpec([None], tf.string), tf.TensorSpec([None], tf.string)]
)
def serving_fn(bytes_inputs, key):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return {"prediction": prob, "key": key}
tf.saved_model.save(
model,
model_artifact_dir,
signatures={
"serving_default": serving_fn,
},
)
loaded = tf.saved_model.load(model_artifact_dir)
tensors_specs = list(loaded.signatures["serving_default"].structured_input_signature)
print("Tensors specs:", tensors_specs)
input_name = [v for k, v in tensors_specs[1].items() if k != "key"][0].name
print("Bytes input tensor name:", input_name)
import base64
import json
import cv2
import numpy as np
import tensorflow as tf
(_, _), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
test_image_1, test_label_1 = x_test[0], y_test[0]
test_image_2, test_label_2 = x_test[1], y_test[1]
cv2.imwrite("tmp1.jpg", (test_image_1 * 255).astype(np.uint8))
cv2.imwrite("tmp2.jpg", (test_image_2 * 255).astype(np.uint8))
gcs_input_uri = "gs://" + BUCKET_NAME + "/" + "test.json"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
for img in ["tmp1.jpg", "tmp2.jpg"]:
bytes = tf.io.read_file(img)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
f.write(json.dumps({"key": img, input_name: {"b64": b64str}}) + "\n")
! gsutil cat $gcs_input_uri
body = {
"jobId": "custom_job_TF_pred_" + TIMESTAMP,
"prediction_input": {
"input_paths": gcs_input_uri,
"output_path": "gs://" + f"{BUCKET_NAME}/batch_output/",
"data_format": "JSON",
"runtime_version": "2.1",
"uri": model_artifact_dir,
"region": "us-central1",
},
}
request = (
cloudml.projects()
.jobs()
.create(
parent=PARENT,
)
)
request.body = json.loads(json.dumps(body, indent=2))
print(json.dumps(json.loads(request.to_json()), indent=2))
request = cloudml.projects().jobs().create(parent=PARENT, body=body)
response = request.execute()
print(json.dumps(response, indent=2))
# The full unique ID for the batch prediction job
batch_job_id = PARENT + "/jobs/" + response["jobId"]
print(batch_job_id)
request = cloudml.projects().jobs().get(name=batch_job_id)
response = request.execute()
print(json.dumps(response, indent=2))
while True:
response = request = cloudml.projects().jobs().get(name=batch_job_id).execute()
if response["state"] != "SUCCEEDED":
print("The job has not completed:", response["state"])
if response["state"] == "FAILED":
break
else:
folder = response["predictionInput"]["outputPath"][:-1]
! gsutil ls $folder/prediction*
! gsutil cat $folder/prediction*
break
time.sleep(60)
request = cloudml.projects().models().create(parent=PARENT)
request.body = json.loads(json.dumps({"name": "custom_job_TF_" + TIMESTAMP}, indent=2))
print(json.dumps(json.loads(request.to_json()), indent=2))
request = (
cloudml.projects()
.models()
.create(parent=PARENT, body={"name": "custom_job_TF_" + TIMESTAMP})
)
response = request.execute()
print(json.dumps(response, indent=2))
# The full unique ID for the training pipeline
model_id = response["name"]
# The short numeric ID for the training pipeline
model_short_name = model_id.split("/")[-1]
print(model_id)
version = {
"name": "custom_job_TF_" + TIMESTAMP,
"deploymentUri": model_artifact_dir,
"runtimeVersion": "2.1",
"framework": "TENSORFLOW",
"pythonVersion": "3.7",
"machineType": "mls1-c1-m2",
}
request = (
cloudml.projects()
.models()
.versions()
.create(
parent=model_id,
)
)
request.body = json.loads(json.dumps(version, indent=2))
print(json.dumps(json.loads(request.to_json()), indent=2))
request = cloudml.projects().models().versions().create(parent=model_id, body=version)
response = request.execute()
print(json.dumps(response, indent=2))
# The full unique ID for the model version
model_version_name = response["metadata"]["version"]["name"]
print(model_version_name)
while True:
response = (
cloudml.projects().models().versions().get(name=model_version_name).execute()
)
if response["state"] == "READY":
print("Model version created.")
break
time.sleep(60)
import base64
import json
import cv2
import tensorflow as tf
(_, _), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
test_image_1, test_label_1 = x_test[0], y_test[0]
test_image_2, test_label_2 = x_test[1], y_test[1]
cv2.imwrite("tmp1.jpg", (test_image_1 * 255).astype(np.uint8))
cv2.imwrite("tmp2.jpg", (test_image_2 * 255).astype(np.uint8))
instances_list = []
for img in ["tmp1.jpg", "tmp2.jpg"]:
bytes = tf.io.read_file(img)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
instances_list.append({"key": img, input_name: {"b64": b64str}})
request = cloudml.projects().predict(name=model_version_name)
request.body = json.loads(json.dumps({"instances": instances_list}, indent=2))
print(json.dumps(json.loads(request.to_json()), indent=2))
request = cloudml.projects().predict(
name=model_version_name, body={"instances": instances_list}
)
response = request.execute()
print(json.dumps(response, indent=2))
request = cloudml.projects().models().versions().delete(name=model_version_name)
response = request.execute()
print(json.dumps(response, indent=2))
delete_model = True
delete_bucket = True
# Delete the model using the Vertex AI fully qualified identifier for the model
try:
if delete_model:
cloudml.projects().models().delete(name=model_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r gs://$BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some 'theory'
Step2: And now applying this on some real data
Step3: <div class="alert alert-success">
Step4: <div class="alert alert-success">
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn
except ImportError:
pass
pd.options.display.max_rows = 10
df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'],
'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]})
df
df.groupby('key').aggregate('sum') # np.sum
df.groupby('key').sum()
cast = pd.read_csv('data/cast.csv')
cast.head()
titles = pd.read_csv('data/titles.csv')
titles.head()
df
def normalize(group):
return (group - group.mean()) / group.std()
df.groupby('key').transform(normalize)
titles.title.value_counts().head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load experimental data
Step2: Define subset of sessions to score
Step3: Parallel scoring
Step4: Save results to disk
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import pandas as pd
import warnings
import nelpy as nel
warnings.filterwarnings("ignore")
datadirs = ['/Users/ckemere/Development/Data/Buzsaki']
fileroot = next( (dir for dir in datadirs if os.path.isdir(dir)), None)
# conda install pandas=0.19.2
if fileroot is None:
raise FileNotFoundError('datadir not found')
load_from_nel = True
# load from nel file:
if load_from_nel:
jar = nel.load_pkl(os.path.join(fileroot,'gor01vvp01_processed_speed.nel'))
exp_data = jar.exp_data
aux_data = jar.aux_data
del jar
with pd.HDFStore(os.path.join(fileroot,'DibaMetadata.h5')) as store:
df = store.get('Session_Metadata')
df2 = store.get('Subset_Metadata')
# restrict sessions to explore to a smaller subset
min_n_placecells = 16
min_n_PBEs = 27 # 27 total events ==> minimum 21 events in training set
df2_subset = df2[(df2.n_PBEs >= min_n_PBEs) & (df2.n_placecells >= min_n_placecells)]
sessions = df2_subset['time'].values.tolist()
segments = df2_subset['segment'].values.tolist()
print('Evaluating subset of {} sessions'.format(len(sessions)))
df2_subset.sort_values(by=['n_PBEs', 'n_placecells'], ascending=[0,0])
n_jobs = 20 # set this equal to number of cores
n_shuffles = 100 # 5000
n_samples = 35000 # 35000
w=3 # single sided bandwidth (0 means only include bin who's center is under line, 3 means a total of 7 bins)
import matplotlib.pyplot as plt
%matplotlib inline
# Parallelize by EVENT
import dask
import distributed.joblib
from joblib import Parallel, delayed
from joblib import parallel_backend
# A function that can be called to do work:
def work_events(arg):
# Split the list to individual variables:
session, segment, ii, bst, tc = arg
scores, shuffled_scores, percentiles = nel.analysis.replay.score_Davidson_final_bst_fast(bst=bst,
tuningcurve=tc,
w=w,
n_shuffles=n_shuffles,
n_samples=n_samples)
return (session, segment, ii, scores, shuffled_scores, percentiles)
# List of instances to pass to work():
# unroll all events:
parallel_events = []
for session, segment in zip(sessions, segments):
for nn in range(aux_data[session][segment]['PBEs'].n_epochs):
parallel_events.append((session, segment, nn, aux_data[session][segment]['PBEs'][nn],
aux_data[session][segment]['tc']))
#parallel_results = list(map(work_events, parallel_events))
with parallel_backend('dask.distributed', scheduler_host='35.184.42.12:8786'):
# Anything returned by work() can be stored:
parallel_results = Parallel(n_jobs=n_jobs, verbose=1)(map(delayed(work_events), parallel_events))
# standardize parallel results
bdries_ = [aux_data[session][segment]['PBEs'].n_epochs for session, segment in zip(sessions, segments) ]
bdries = np.cumsum(np.insert(bdries_,0,0))
bdries
sessions_ = np.array([result[0] for result in parallel_results])
segments_ = np.array([result[1] for result in parallel_results])
idx = [result[2] for result in parallel_results]
scores_bayes_evt = np.array([float(result[3]) for result in parallel_results])
scores_bayes_shuffled_evt = np.array([result[4].squeeze() for result in parallel_results])
scores_bayes_percentile_evt = np.array([float(result[5]) for result in parallel_results])
results = {}
for nn in range(len(bdries)-1):
session = np.unique(sessions_[bdries[nn]:bdries[nn+1]])
if len(session) > 1:
raise ValueError("parallel results in different format / order than expected!")
session = session[0]
segment = np.unique(segments_[bdries[nn]:bdries[nn+1]])
if len(segment) > 1:
raise ValueError("parallel results in different format / order than expected!")
segment = segment[0]
try:
results[session][segment]['scores_bayes'] = scores_bayes_evt[bdries[nn]:bdries[nn+1]]
except KeyError:
try:
results[session][segment] = dict()
results[session][segment]['scores_bayes'] = scores_bayes_evt[bdries[nn]:bdries[nn+1]]
except KeyError:
results[session] = dict()
results[session][segment] = dict()
results[session][segment]['scores_bayes'] = scores_bayes_evt[bdries[nn]:bdries[nn+1]]
results[session][segment]['scores_bayes_shuffled'] = scores_bayes_shuffled_evt[bdries[nn]:bdries[nn+1]]
results[session][segment]['scores_bayes_percentile'] = scores_bayes_percentile_evt[bdries[nn]:bdries[nn+1]]
print('done packing results')
jar = nel.ResultsContainer(results=results, description='gor01 and vvp01 speed restricted results for best 20 candidate sessions')
jar.save_pkl('score_bayes_all_sessions.nel')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Documents have a few more features than plaintext files. They have the following objects in this module
Step2: We then use the Document() function to open the word document as a document object.
Step3: We can list the paragraphs within this document object.
Step4: Every paragraph object includes a .text variable containing the string of the document.
Step5: As you can see, these paragraph objects match the text in the document.
Step6: Paragraph objects hold run objects.
Step7: Run objects have a .bold or .italics variable, identifying whether that run has that style property.
Step8: We can save style attributes using this approach.
Step9: We can save our changes by using the .save method on the document object.
Step10: Word also has document themes and styles such as 'Normal' or 'Heading 1', etc. These are accessible for paragraph objects.
Step11: This module can also be used to create blank word documents in memory using the Document() function.
Step12: We can now add paragraphs to this document object.
Step13: We can save this created document to the hard drive using the .save() method again. It will be saved here as demo4.docx.
Step14: We can also create runs within these paragraph objects.
Step15: You cannot insert paragraph and run functions anywhere but the end, but there is a way to edit files by opening a new document and mirroring the old document with changes.
Step16: We will test it on demo.docx and demo4.docx.
|
<ASSISTANT_TASK:>
Python Code:
import docx
import os
os.chdir('files')
d = docx.Document('demo.docx')
type(d)
d.paragraphs
d.paragraphs[0]
d.paragraphs[0].text
d.paragraphs[1].text
p = d.paragraphs[1]
p.runs
p.runs[0].text
p.runs[1].text
p.runs[2].text
p.runs[2].bold
p.runs[3].text
p.runs[3].underline = True
d.save('demo2.docx')
p.style = 'Title'
d.save('demo2.docx')
d = docx.Document()
d.add_paragraph('This is a paragraph.')
d.add_paragraph('This is another paragraph.')
d.save('demo4.docx')
# Select the first paragraph we created.
p = d.paragraphs[0]
# Add a new run to that paragraph.
p.add_run('This is a new run')
# Set the newly added run (Index 1) to a True bold variable
p.runs[1].bold = True
# Save changes
d.save('demo4.docx')
import docx
# Create a function to get text from a file
def getText(filename):
# Read the word document
doc = docx.Document(filename)
# Create an empty set to store words
fullText = []
# for each paragraph in the document paragraph list
for para in doc.paragraphs:
# Append the empty set with the new paragraph objects
fullText.append(para.text)
# Join the full text with newlines
return '\n'.join(fullText)
print(getText('demo.docx'))
print(getText('demo4.docx'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: Create a MarkovChain instance
Step3: Classification of states
Step4: Determine the communication classes
Step5: Classify the states of this Markov chain
Step6: Obtain a list of the recurrent states
Step7: Obtain a list of the transient states
Step8: A Markov chain is reducible (i.e., its directed graph is not strongly connected)
Step9: This Markov chain is aperiodic
Step10: Indeed, each of the sub-chains corresponding to the recurrent classes has period $1$,
Step11: Stationary distributions
Step12: These are indeed stationary distributions
Step14: Plot these distributions.
Step15: Simulation
Step16: As is clear from the transition matrix P,
Step17: You can observe that the chain stays in the recurrent class ${1,4}$
Step18: Note on reproducibility
Step20: Time series averages
Step21: Here is a frequency distribution along a sample path, of length 100,
Step22: Length 10,000
Step23: The distribution becomes close to the stationary distribution (0, 1/3, 0, 0, 2/3, 0).
Step24: Start with state 2,
Step25: Run the above cell several times;
Step26: In fact,
Step28: Cross sectional averages
Step29: Start with state 1
Step30: The distribution is close to the stationary distribution (0, 1/3, 0, 0, 2/3, 0).
Step31: Starting with a transient state 2
Step32: Observe that the distribution is close to a convex combination of
Step33: Since our Markov chain is aperiodic (i.e., every recurrent class is aperiodic),
Step34: Powers of $P$
Step35: In the canonical form
Step36: Observe that the first three rows, which correspond to the recurrent states,
Step37: This Markov chain is irreducible
Step38: This Markov chain is periodic
Step39: Its period, which we denote by $d$
Step40: Identify the cyclic classes
Step41: Cyclic normal form
Step42: Re-define the Markov chain with the above matrix Q
Step43: Obtain the block components $P_0, \cdots, P_{d-1}$
Step44: $P^d$ is block diagonal
Step45: The $i$th diagonal block of $P^d$ equals $P_i P_{i+1} \cdots P_{d-1} P_0 \cdots P_{i-1}$
Step46: Stationary distributions
Step47: Obtain the stationary distributions $\pi^0, \ldots, \pi^{d-1}$
Step48: Verify that $\pi^{i+1} = \pi^i P_i$
Step49: Verify that $\pi = (\pi^0 + \cdots + \pi^{d-1})/d$
Step50: Powers of $P$
Step51: Print $P^1, P^2, \ldots, P^d$
Step52: Print $P^{2d}$, $P^{4d}$, and $P^{6d}$
Step53: $P^{kd}$ converges as $k \to \infty$ to a matrix that contains $\pi^0, \ldots, \pi^{d-1}$.
Step54: But $P^i$ itself does not converge.
Step55: Observe that the distribution is close to the (unique) stationary distribution $\pi$.
Step56: Next, plot the simulated marginal distributions
Step57: Compare these with the rows of $P^{10d+1}, \ldots, P^{10d+d}$.
Step58: If $\varepsilon = 0$,
Step59: If $\varepsilon > 0$ but small, the chain is irreducible,
Step60: Analytically, the unique stationary distribution of the chain with $\varepsilon > 0$
Step61: The output becomes farther from the actual stationary distribution (1/3, 1/3, 1/3)
Step62: MarkovChain in quantecon employs
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from quantecon.markov import MarkovChain
P = np.zeros((6, 6))
P[0, 0] = 1
P[1, 4] = 1
P[2, [2, 3, 4]] = 1/3
P[3, [0, 5]] = 1/2
P[4, [1, 4]] = 1/2
P[5, [0, 3]] = 1/2
print(P)
mc1 = MarkovChain(P)
mc1.is_irreducible
mc1.num_communication_classes
mc1.communication_classes
mc1.recurrent_classes
recurrent_states = np.concatenate(mc1.recurrent_classes)
print(recurrent_states)
transient_states = np.setdiff1d(np.arange(mc1.n), recurrent_states)
print(transient_states)
permutation = np.concatenate([recurrent_states, transient_states])
print(mc1.P[permutation, :][:, permutation])
mc1.is_aperiodic
for recurrent_class in mc1.recurrent_classes:
sub_matrix = P[recurrent_class, :][:, recurrent_class]
d = MarkovChain(sub_matrix).period
print('Period of the sub-chain\n{0}\n = {1}'.format(sub_matrix, d))
print(mc1.stationary_distributions)
print(mc1.stationary_distributions.dot(mc1.P))
def draw_histogram(distribution, ax=None, figsize=None,
title=None, xlabel=None, ylabel=None, ylim=(0, 1)):
Plot the given distribution.
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
n = len(distribution)
ax.bar(np.arange(n), distribution, align='center')
ax.set_xlim(-0.5, (n-1)+0.5)
ax.set_ylim(*ylim)
if title:
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
if ax is None:
plt.show()
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
titles = ['Stationary distribution for the recurrent class {0}'.format(recurrent_class)
for recurrent_class in mc1.recurrent_classes]
for ax, title, dist in zip(axes, titles, mc1.stationary_distributions):
draw_histogram(dist, ax=ax, title=title, xlabel='States')
fig.suptitle('Stationary distributions', y=-0.05, fontsize=12)
plt.show()
mc1.simulate(50, init=0)
mc1.simulate(50, init=1)
mc1.simulate(50)
mc1.simulate(50, random_state=12345)
def time_series_dist(mc, t, init=None, random_state=None):
Return the distribution of visits by a sample path of length t
of mc with an initial state init.
t_max = np.max(t)
dim = 1
try:
ts_size = len(t) # t is an array
ts_array = t
dim = 2
except: # t is an int
ts_size = 1
ts_array = [t]
X = mc.simulate(ts_length=t_max, init=init, random_state=random_state)
dists = np.empty((ts_size, mc.n))
bins = np.arange(mc.n+1)
for i, length in enumerate(ts_array):
hist, bin_edges = np.histogram(X[:length], bins=bins)
dists[i, :] = hist / length
if dim == 1:
return dists[0]
else:
return dists
time_series_dist(mc1, t=100, init=1)
time_series_dist(mc1, t=10**4, init=1)
def plot_time_series_dists(mc, init, ts, seed=None, figsize=(12,4)):
dists = time_series_dist(mc, t=ts, init=init, random_state=seed)
fig, axes = plt.subplots(1, len(ts), figsize=figsize)
titles = ['t={0}'.format(t) for t in ts]
for ax, title, dist in zip(axes, titles, dists):
draw_histogram(dist, ax=ax, title=title, xlabel='States')
fig.suptitle('Time series distributions with init={0}'.format(init),
y=-0.05, fontsize=12)
plt.show()
init = 1
ts = [5, 10, 50, 100]
plot_time_series_dists(mc1, init, ts)
init = 2
ts = [5, 10, 50, 100]
plot_time_series_dists(mc1, init, ts)
init = 2
ts = [5, 10, 50, 100]
seeds = [222, 2222]
descriptions = ['{0} sample path with init={1}'.format(adjective, init) for adjective
in ['Some'] + ['Another'] + ['Yet another']*(len(seeds)-1)]
for seed, description in zip(seeds, descriptions):
print(description)
plot_time_series_dists(mc1, init, ts, seed=seed)
inits = [3, 4, 5]
t = 100
fig, axes = plt.subplots(1, 3, figsize=(12, 3))
for init, ax in zip(inits, axes):
draw_histogram(time_series_dist(mc1, t=t, init=init), ax=ax,
title='Initial state = {0}'.format(init),
xlabel='States')
fig.suptitle('Time series distributions for t={0}'.format(t),
y=-0.05, fontsize=12)
plt.show()
def cross_sectional_dist(mc, T, init=None, num_reps=10**4, random_state=None):
Return the distribution of visits at time T by num_reps times of simulation
of mc with an initial state init.
T_max = np.max(T)
dim = 1
try:
Ts_size = len(T) # T is an array
Ts_array = T
dim = 2
except: # T is an int
Ts_size = 1
Ts_array = [T]
x = mc.simulate(ts_length=T_max+1, init=init, num_reps=num_reps,
random_state=random_state)[:, Ts_array]
dists = np.empty((x.shape[-1], mc.n))
bins = np.arange(mc.n+1)
for i in range(x.shape[-1]):
hist, bin_edges = np.histogram(x[:, i], bins=bins)
dists[i, :] = hist / num_reps
if dim == 1:
return dists[0]
else:
return dists
init = 1
T = 10
cross_sectional_dist(mc1, init=init, T=T)
T = 100
cross_sectional_dist(mc1, init=init, T=T)
def plot_cross_sectional_dists(mc, init, Ts, num_reps=10**4, seed=None,
figsize=(12,4)):
dists = cross_sectional_dist(mc, T=Ts, init=init, num_reps=num_reps,
random_state=seed)
fig, axes = plt.subplots(1, len(Ts), figsize=figsize)
titles = ['T={0}'.format(T) for T in Ts]
for ax, title, dist in zip(axes, titles, dists):
draw_histogram(dist, ax=ax, title=title, xlabel='States')
fig.suptitle('Cross sectional distributions with init={0}'.format(init),
y=-0.05, fontsize=12)
plt.show()
init = 1
Ts = [2, 3, 5, 10]
plot_cross_sectional_dists(mc1, init, Ts)
init = 2
T = 10
cross_sectional_dist(mc1, init=init, T=T)
T = 100
dist = cross_sectional_dist(mc1, init=init, T=T)
dist
draw_histogram(dist,
title='Cross sectional distribution at T={T} with init={init}'
.format(T=T, init=init),
xlabel='States')
init = 2
Ts = [2, 3, 5, 10]
plot_cross_sectional_dists(mc1, init, Ts)
inits = [3, 4, 5]
T = 10
fig, axes = plt.subplots(1, 3, figsize=(12, 3))
for init, ax in zip(inits, axes):
draw_histogram(cross_sectional_dist(mc1, T=T, init=init), ax=ax,
title='Initial state = {0}'.format(init),
xlabel='States')
fig.suptitle('Cross sectional distribution at T={0}'.format(T),
y=-0.05, fontsize=12)
plt.show()
np.set_printoptions(suppress=True) # Suppress printing with floating point notation
Ts = [10, 20, 30]
for T in Ts:
print('P^{T} =\n{P_T}'.format(T=T, P_T=np.linalg.matrix_power(mc1.P, T)))
Q = mc1.P[permutation, :][:, permutation]
print('Q =\n{Q}'.format(Q=Q))
for T in Ts:
print('Q^{T} =\n{Q_T}'.format(T=T, Q_T=np.linalg.matrix_power(Q, T)))
P = np.zeros((10, 10))
P[0, 3] = 1
P[1, [0, 4]] = 1/2
P[2, 6] = 1
P[3, [1, 2, 7]] = 1/3
P[4, 3] = 1
P[5, 4] = 1
P[6, 3] = 1
P[7, [6, 8]] = 1/2
P[8, 9] = 1
P[9, 5] = 1
np.set_printoptions(precision=3) # Reduce the number of digits printed
print(P)
mc2 = MarkovChain(P)
mc2.is_irreducible
mc2.is_aperiodic
d = mc2.period
print(d)
mc2.cyclic_classes
permutation = np.concatenate(mc2.cyclic_classes)
Q = mc2.P[permutation, :][:, permutation]
print(Q)
mc2 = MarkovChain(Q)
P_blocks = []
for i in range(d):
P_blocks.append(mc2.P[mc2.cyclic_classes[i%d], :][:, mc2.cyclic_classes[(i+1)%d]])
print('P_{i} =\n{P_block}'.format(i=i, P_block=P_blocks[i]))
P_power_d = np.linalg.matrix_power(mc2.P, d)
print(P_power_d)
P_power_d_blocks = []
ordinals = ['0th', '1st', '2nd']
for i in range(d):
P_power_d_blocks.append(P_power_d[mc2.cyclic_classes[i], :][:, mc2.cyclic_classes[i]])
print('{ordinal} diagonal block of P^d =\n{P_power_d_block}'
.format(ordinal=ordinals[i], P_power_d_block=P_power_d_blocks[i]))
products = []
for i in range(d):
R = np.eye(P_blocks[i].shape[0])
string = ''
for j in range(d):
R = R.dot(P_blocks[(i+j)%d])
string += 'P_{0} '.format((i+j)%d)
products.append(R)
print(string + '=\n{R}'.format(R=R))
for matrix0, matrix1 in zip(P_power_d_blocks, products):
print(np.array_equal(matrix0, matrix1))
len(mc2.stationary_distributions)
pi = mc2.stationary_distributions[0]
print(pi)
draw_histogram(pi,
title='Stationary distribution', xlabel='States', ylim=(0, 0.35))
pi_s = []
for i in range(d):
pi_s.append(MarkovChain(P_power_d_blocks[i]).stationary_distributions[0])
print('pi^{i} =\n{pi_i}'.format(i=i, pi_i=pi_s[i]))
fig, axes = plt.subplots(1, d, figsize=(12, 3))
for i, ax in enumerate(axes):
pi_i_full_dim = np.zeros(mc2.n)
pi_i_full_dim[mc2.cyclic_classes[i]] = pi_s[i]
draw_histogram(pi_i_full_dim,
ax=ax, title='$\pi^{i}$'.format(i=i), xlabel='States')
fig.suptitle('Stationary distributions for the diagonal blocks',
y=-0.05, fontsize=12)
plt.show()
for i in range(d):
print('pi^{i} P_{i} =\n{dot}'.format(i=i, dot=np.dot(pi_s[i], P_blocks[i])))
# Right hand side of the above identity
rhs = np.zeros(mc2.n)
for i in range(d):
rhs[mc2.cyclic_classes[i]] = pi_s[i]
rhs /= d
print(rhs)
np.allclose(pi, rhs)
np.set_printoptions(suppress=True) # Suppress printing with floating point notation
for i in range(1, d+1):
print('P^{i} =\n{P_i}'.format(i=i, P_i=np.linalg.matrix_power(mc2.P, i)))
for i in [k*d for k in [2, 4, 6]]:
print('P^{i} =\n{P_i}'.format(i=i, P_i=np.linalg.matrix_power(mc2.P, i)))
for i in range(10*d+1, 10*d+1+d):
print('P^{i} =\n{P_i}'.format(i=i, P_i=np.linalg.matrix_power(mc2.P, i)))
init = 0
dist = time_series_dist(mc2, init=init, t=10**4)
print(dist)
draw_histogram(dist,
title='Time series distribution with init={0}'.format(init),
xlabel='States', ylim=(0, 0.35))
plt.show()
print(pi)
init = 0
k = 10
Ts = [k*d + 1 + i for i in range(2*d)]
num_reps = 10**2
dists = cross_sectional_dist(mc2, T=Ts, init=init, num_reps=num_reps)
fig, axes = plt.subplots(2, d, figsize=(12, 6))
for dist, T, ax in zip(dists, Ts, axes.flatten()):
draw_histogram(dist, ax=ax,
title='T = {T}'.format(T=T))
fig.suptitle('Cross sectional distributions with init={init}'.format(init=init),
y=0.05, fontsize=12)
plt.show()
def P_epsilon(eps, p=0.5):
P = np.array([[1-(p+eps), p, eps],
[p, 1-(p+eps), eps],
[eps, eps, 1-2*eps]])
return P
P_epsilon(0)
MarkovChain(P_epsilon(0)).recurrent_classes
P_epsilon(0.001)
MarkovChain(P_epsilon(0.001)).recurrent_classes
np.set_printoptions(precision=15) # Increase the number of digits printed
epsilons = [float('1e-{i}'.format(i=i)) for i in range(12, 18)]
for eps in epsilons:
print('epsilon = {eps}'.format(eps=eps))
w, v = np.linalg.eig(P_epsilon(eps).T)
i = w.argmax()
print(v[:, i]/v[:, i].sum())
import scipy.linalg
for eps in epsilons:
print('epsilon = {eps}'.format(eps=eps))
w, v = scipy.linalg.eig(P_epsilon(eps), left=True, right=False)
i = w.argmax()
print(v[:, i]/v[:, i].sum())
for eps in epsilons + [1e-100]:
print('epsilon = {eps}'.format(eps=eps))
print(MarkovChain(P_epsilon(eps)).stationary_distributions[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: factorplot and FacetGrid
|
<ASSISTANT_TASK:>
Python Code:
names = [
'mpg'
, 'cylinders'
, 'displacement'
, 'horsepower'
, 'weight'
, 'acceleration'
, 'model_year'
, 'origin'
, 'car_name'
]
# reading the file and assigning the header
df = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data", sep='\s+', names=names)
df['maker'] = df.car_name.map(lambda x: x.split()[0])
df.origin = df.origin.map({1: 'America', 2: 'Europe', 3: 'Asia'})
df=df.applymap(lambda x: np.nan if x == '?' else x).dropna()
df['horsepower'] = df.horsepower.astype(float)
df.head()
sns.factorplot(data=df, x="model_year", y="mpg")
sns.factorplot(data=df, x="model_year", y="mpg", col="origin")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets take a look at what we made
Step2: Not sorted (default)
Step3: Sort it
Step4: Sorting using key argument
Step5: Extracting Datetimes
Step6: Put into a Pandas DataFrame and Plot
Step7: Visualizing Behavoir with signature
Step8: This array has shape ( 'Number of Networks' , 'Number frequency points'). This can be visualized with imshow. Most of the code below just adds labels, and axis-scales.
Step9: This process is automated with the method NetworkSet.signature(). It even has a vs_time parameter which will automatically create the DateTime index from the Network's names, if they were written by rf.now_string()
|
<ASSISTANT_TASK:>
Python Code:
from time import sleep
import skrf as rf
%matplotlib inline
from pylab import *
rf.stylely()
!rm -rf tmp
!mkdir tmp
wg = rf.wr10 # just a dummy media object to generate data
wg.frequency.npoints = 101
for k in range(10):
# timestamp generated with `rf.now_string()`
ntwk = wg.random(name=rf.now_string()+'.s1p')
ntwk.s = k*ntwk.s
ntwk.write_touchstone(dir='tmp')
sleep(.1)
ls tmp
ns = rf.NS.from_dir('tmp')
ns.ntwk_set
ns.sort()
ns.ntwk_set
ns = rf.NetworkSet.from_dir('tmp')
ns.sort(key = lambda x: x.name.split('.')[0])
ns.ntwk_set
ns.sort()
dt_idx = [rf.now_string_2_dt(k.name ) for k in ns]
dt_idx
import pandas as pd
dates = pd.DatetimeIndex(dt_idx)
# create a function to pull out S11 in degrees at a specific frequency
s_deg_at = lambda s:{s: [k[s].s_deg[0,0,0] for k in ns]}
for f in ['80ghz', '90ghz','100ghz']:
df =pd.DataFrame(s_deg_at(f), index=dates)
df.plot(ax=gca())
title('Phase Evolution in Time')
ylabel('S11 (deg)')
mat = array([k.s_db.flatten() for k in ns])
mat.shape
freq = ns[0].frequency
# creates x and y scales
extent = [freq.f_scaled[0], freq.f_scaled[-1], len(ns) ,0]
#make the image
imshow(mat, aspect='auto',extent=extent,interpolation='nearest')
# label things
grid(0)
freq.labelXAxis()
ylabel('Network #')
cbar = colorbar()
cbar.set_label('Magntidue (dB)')
ns.signature(component='s_db', vs_time=True,cbar_label='Magnitude (dB)')
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.