Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
7,300
|
<ASSISTANT_TASK:>
Python Code:
bits = np.array([0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],\
dtype = 'uint8')
hexprint(bits)
data = bits[16:16+161*8-16*8]
hexprint(data)
def reflect_bytes(x):
return np.fliplr(x[:x.size//8*8].reshape((-1,8))).ravel()
data_rev = reflect_bytes(data)
hexprint(data_rev)
def destuff(x):
y = list()
run = 0
for i, bit in enumerate(x):
if run == 5:
if bit == 1:
print('Long run found at bit', i)
break
else:
run = 0
elif bit == 0:
run = 0
y.append(bit)
elif bit == 1:
run += 1
y.append(bit)
return np.array(y, dtype = 'uint8')
data_rev_destuff = destuff(data_rev)
1193/8
hexprint(data_rev_destuff)
def descramble(x):
y = np.concatenate((np.zeros(17, dtype='uint8'), x))
z = y[:-17] ^ y[5:-12] ^ y[17:]
return z
def nrzi_decode(x):
return x ^ np.concatenate((np.zeros(1, dtype = 'uint8'), x[:-1])) ^ 1
data_descrambled = descramble(data_rev_destuff)
hexprint(data_descrambled)
data_nrz = nrzi_decode(data_descrambled)
hexprint(data_nrz)
data_nrz_rev = reflect_bytes(data_nrz)
hexprint(data_nrz_rev)
raw_input_bits = np.array([1,1,0,1,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,0,1,0,1,0,0,0,1,0,1,1,1,0,1,0,1,0,1,1,0,0,0,1,0,0,1,0,0,0,1,1,1,0,0,0,1,0,1,1,0,0,0,1,0,0,1,0,1,1,0,0,0,0,1,1,1,0,0,0,1,0,1,0,0,0,1,1,1,1,1,0,1,0,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,0,0,1,1,1,0,0,1,1,0,0,1,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,1,0,1,1,0,1,0,1,0,0,1,0,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,1,1,0,0,1,0,1,0,0,0,0,0,1,0,1,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,1,0,0,0,0,1,1,1,1,1,0,0,0,1,1,1,0,1,1,1,0,0,1,1,1,1,1,0,1,1,1,0,1,0,0,0,1,0,1,1,0,1,0,1,0,1,0,0,0,1,1,0,1,1,0,0,1,1,0,0,0,1,0,1,0,0,0,0,1,1,0,1,0,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,1,0,1,0,0,0,1,1,0,0,1,1,0,1,1,1,1,1,0,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,1,0,0,1,1,0,0,1,0,0,0,1,0,0,0,0,1,1,1,0,1,1,0,0,1,1,0,0,1,0,1,0,0,1,1,1,0,1,1,0,1,0,1,1,0,0,0,0,1,1,0,1,0,0,0,0,0,1,1,1,0,0,0,1,1,1,0,0,1,0,0,1,1,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,1,0,0,0,1,0,0,1,1,0,0,0,1,1,1,1,0,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,1,1,0,0,1,0,1,1,0,1,1,1,0,1,0,0,1,0,0,0,0,1,1,0,0,0,1,1,0,0,1,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,1,1,0,0,1,1,1,0,0,1,0,0,1,0,0,0,0,1,0,0,0,0,1,1,0,1,1,0,0,1,1,0,1,0,1,0,0,0,0,0,1,0,1,0,0,1,1,1,0,1,0,1,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,1,0,0,1,1,1,0,1,1,0,0,0,0,1,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,1,0,0,1,1,1,1,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,1,0,0,0,1,0,0,1,1,1,0,0,1,0,1,0,1,1,0,1,0,0,1,1,1,1,0,1,1,1,0,1,0,1,0,1,1,1,1,1,0,1,0,1,1,0,1,0,0,0,1,1,0,0,0,0,1,0,0,1,1,0,0,0,1,0,0,1,1,1,0,1,1,1,1,0,1,0,1,0,1,1,1,0,1,1,0,0,0,1,0,1,1,1,0,1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,1,0,1,1,0,1,1,0,0,0,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,0,0,1,1,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,1,0,1,1,1,1,1,0,0,1,1,0,0,0,0,1,1,0,1,1,0,1,1,1,1,0,0,0,0,1,0,1,1,1,1,0,0,1,0,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,1,0,0,1,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,1,0,1,1,1,1,0,1,0,1,1,1,0,1,1,0,1,1,0,1,1,1,0,1,1,0,0,1,0,1,0,0,0,1,1,1,1,1,0,0,1,0,1,1,1,1,1,1,0,1,1,0,1,1,1,0,1,1,1,0,0,1,1,0,1,1,1,1,1,0,1,0,0,1,1,0,1,0,1,1,1,0,1,1,1,1,1,1,0,0,1,1,0,1,0,1,1,1,0,1,0,0,1,0,0,0,0,0,1,0,1,0,0,1,1,0,1,1,1,1,1,0,1,0,0,1,1,1,1,1,0,0,1,1,1,1,1,0,0,0,0,1,1,0,1,1,0,1,0,0,1,1,0,1,1,1,0,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,1,1,1,0,0,1,0,1,1,0,0,0,1,1,1,1,1,0,0,0,1,0,0,0,0,1,1,0,0,0,0,1,1,1,1,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0,1,0,0,1,1,1,1,1,1,1,1,0,0,1,0,1,1,1,0,0,1,0,1,1,0,0,0,0,1,1,0,0,0,1,0,1,1,0,1,0,0,0,0,1,0,0,0,1,1,0,1,1,0,1,0,1,0,1,1,1,1,0,0,0,0,0,0,0,1,0,0,1,0,0,1,1,0,0,1,0,1,0,1,0,1,1,0,1,0,0,1,1,0,0,0,1,1,0,1,1,0,0,1,0,0,0,1,0,1,0,1,1,0,0,0,0,1,1,0,1,0,0,0,1,0,1,0,0,1,0,0,1,1,1,1,1,0,0,1,1,0,1,1,1,0,1,0,1,0,0,1,1,1,1,0,1,1,1,0,0], dtype = 'uint8')
hexprint(raw_input_bits)
raw_input_bits.size//8
raw_input_stream = 'D3F8 0EA2 EAC4 8E2C 4B0E 28FA 9020 C733 A658 CB52 EF01 9416 1429 18E2 E87C 773E E8B5 46CC 50D0 CDCA 337D 1B83 3726 443B 329D AC34'
#input_stream = np.unpackbits(np.frombuffer(binascii.a2b_hex(raw_input_stream.replace(' ','')), dtype='uint8'))
input_stream = raw_input_bits
input_stream_reflected = reflect_bytes(input_stream)
hexprint(input_stream_reflected)
input_stream_reflected_no_rs = input_stream_reflected[:-16*8]
input_stream_reflected_no_rs.size//8
after_unstuffing = destuff(input_stream_reflected_no_rs)
hexprint(after_unstuffing)
after_unstuffing.size/8
after_derandom = nrzi_decode(descramble(after_unstuffing))
hexprint(reflect_bytes(after_derandom))
after_derandom.size
reflect_bytes(after_derandom).size/8
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trim the data between the 0x7e7e flags. We skip Reed-Solomon decoding, since we are confident that there are no bit errors. We remove the 16 Reed-Solomon parity check bytes.
Step2: Reverse the bytes in the data.
Step3: Perform bit de-stuffing.
Step4: That it interesting, we have found a run of ones longer than 5 inside the data. We wouln't expect such a run due to byte stuffing. This happens during byte of a total of 161 data bytes.
Step5: Perform G3RUH descrambling.
Step6: Perform NRZ-I decoding.
Step7: The long sequences of zeros are a good indicator, but still we don't have the expected 8A A6 8A 9E 40 40 60 92 AE 68 88 AA 98 61 AX.25 header.
Step8: The CRC is CRC16_CCITT_ZERO following the notation of this online calculator.
Step9: They have CC64 rather than ec 64 near the end. Why?
Step10: Here we have 18 3d instead of 1839 near the end.
Step11: For some reason we have needed to do something funny with the start of the descrambler (changing byte align) and reflect the bytes again to get something as in their example.
|
7,301
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pymc3 as pm
import pandas as pd
import seaborn as sns
sns.set(color_codes=True)
np.random.seed(20090425)
drug = (101,100,102,104,102,97,105,105,98,101,100,123,105,103,100,95,102,106,
109,102,82,102,100,102,102,101,102,102,103,103,97,97,103,101,97,104,
96,103,124,101,101,100,101,101,104,100,101)
placebo = (99,101,100,101,102,100,97,101,104,101,102,102,100,105,88,101,100,
104,100,100,100,101,102,103,97,101,101,100,101,99,101,100,100,
101,100,99,101,100,102,99,100,99)
y1 = np.array(drug)
y2 = np.array(placebo)
y = pd.DataFrame(dict(value=np.r_[y1, y2], group=np.r_[['drug']*len(drug), ['placebo']*len(placebo)]))
y.hist('value', by='group');
μ_m = y.value.mean()
μ_s = y.value.std() * 2
with pm.Model() as model:
group1_mean = pm.Normal('group1_mean', μ_m, sd=μ_s)
group2_mean = pm.Normal('group2_mean', μ_m, sd=μ_s)
σ_low = 1
σ_high = 10
with model:
group1_std = pm.Uniform('group1_std', lower=σ_low, upper=σ_high)
group2_std = pm.Uniform('group2_std', lower=σ_low, upper=σ_high)
with model:
ν = pm.Exponential('ν_minus_one', 1/29.) + 1
sns.distplot(np.random.exponential(30, size=10000), kde=False);
with model:
λ1 = group1_std**-2
λ2 = group2_std**-2
group1 = pm.StudentT('drug', nu=ν, mu=group1_mean, lam=λ1, observed=y1)
group2 = pm.StudentT('placebo', nu=ν, mu=group2_mean, lam=λ2, observed=y2)
with model:
diff_of_means = pm.Deterministic('difference of means', group1_mean - group2_mean)
diff_of_stds = pm.Deterministic('difference of stds', group1_std - group2_std)
effect_size = pm.Deterministic('effect size',
diff_of_means / np.sqrt((group1_std**2 + group2_std**2) / 2))
with model:
trace = pm.sample(2000, njobs=3)
pm.plot_posterior(trace[100:],
varnames=['group1_mean', 'group2_mean', 'group1_std', 'group2_std', 'ν_minus_one'],
color='#87ceeb');
pm.plot_posterior(trace[1000:],
varnames=['difference of means', 'difference of stds', 'effect size'],
ref_val=0,
color='#87ceeb');
pm.forestplot(trace[1000:], varnames=[v.name for v in model.vars])
pm.summary(trace[1000:],
varnames=['difference of means', 'difference of stds', 'effect size'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first step in a Bayesian approach to inference is to specify the full probability model that corresponds to the problem. For this example, Kruschke chooses a Student-t distribution to describe the distributions of the scores in each group. This choice adds robustness to the analysis, as a T distribution is less sensitive to outlier observations, relative to a normal distribution. The three-parameter Student-t distribution allows for the specification of a mean $\mu$, a precision (inverse-variance) $\lambda$ and a degrees-of-freedom parameter $\nu$
Step2: The group standard deviations will be given a uniform prior over a plausible range of values for the variability of the outcome variable, IQ.
Step3: We follow Kruschke by making the prior for $\mu$ exponentially distributed with a mean of 30; this allocates high prior probability over the regions of the parameter that describe the range from normal to heavy-tailed data under the Student-T distribution.
Step4: Since PyMC3 parameterizes the Student-T in terms of precision, rather than standard deviation, we must transform the standard deviations before specifying our likelihoods.
Step5: Having fully specified our probabilistic model, we can turn our attention to calculating the comparisons of interest in order to evaluate the effect of the drug. To this end, we can specify deterministic nodes in our model for the difference between the group means and the difference between the group standard deviations. Wrapping them in named Deterministic objects signals to PyMC that we wish to record the sampled values as part of the output.
Step6: Now, we can fit the model and evaluate its output.
Step7: We can plot the stochastic parameters of the model. PyMC's plot_posterior function replicates the informative histograms portrayed in Kruschke (2012). These summarize the posterior distributions of the parameters, and present a 95% credible interval and the posterior mean. The plots below are constructed with the final 1000 samples from each of the 2 chains, pooled together.
Step8: Looking at the group differences, we can conclude that there are meaningful differences between the two groups for all three measures. For these comparisons, it is useful to use zero as a reference value (ref_val); providing this reference value yields cumulative probabilities for the posterior distribution on either side of the value. Thus, for the difference in means, 99.4% of the posterior probability is greater than zero, which suggests the group means are credibly different. The effect size and differences in standard deviation are similarly positive.
Step9: When forestplot is called on a trace with more than one chain, it also plots the potential scale reduction parameter, which is used to reveal evidence for lack of convergence; values near one, as we have here, suggest that the model has converged.
|
7,302
|
<ASSISTANT_TASK:>
Python Code:
def fahr_to_celsius(temp):
return ((temp - 32) * (5/9))
fahr_to_celsius(32)
print('freezing point of water:', fahr_to_celsius(32), 'C')
print('boiling point of water:', fahr_to_celsius(212), 'C')
def celsius_to_kelvin(temp_c):
return temp_c + 273.15
print('freezing point of water in Kelvin:', celsius_to_kelvin(0.))
def fahr_to_kelvin(temp_f):
temp_c = fahr_to_celsius(temp_f)
temp_k = celsius_to_kelvin(temp_c)
return temp_k
print('boiling point of water in Kelvin:', fahr_to_kelvin(212.0))
print('Again, temperature in Kelvin was:', temp_k)
temp_kelvin = fahr_to_kelvin(212.0)
print('temperature in Kelvin was:', temp_kelvin)
def print_temperatures():
print('temperature in Fahrenheit was:', temp_fahr)
print('temperature in Kelvin was:', temp_kelvin)
temp_fahr = 212.0
temp_kelvin = fahr_to_kelvin(temp_fahr)
print_temperatures()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function definition opens with the keyword def followed by the name of the function (fahr_to_celsius) and a parenthesized list of parameter names (temp). The body of the function — the statements that are executed when it runs — is indented below the definition line. The body concludes with a return keyword followed by the return value.
Step2: Now that we’ve seen how to turn Fahrenheit into Celsius, we can also write the function to turn Celsius into Kelvin
Step3: What about converting Fahrenheit to Kelvin? We could write out the formula, but we don’t need to. Instead, we can compose the two functions we have already created
Step4: This is our first taste of how larger programs are built
Step5: If you want to reuse the temperature in Kelvin after you have calculated it with fahr_to_kelvin, you can store the result of the function call in a variable
Step6: The variable temp_kelvin, being defined outside any function, is said to be global.
|
7,303
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
# load both sheets as new dataframes
shows_df = pd.read_csv("show_category.csv")
views_df = pd.read_excel("views.xls")
shows_df.head()
shows_df = shows_df.set_index('showname')
shows_df.head()
views_df.head()
views_df = views_df.set_index('viewer_id')
views_df.head() # note that we can have repeating viewer_id values (they're non-unique)
# we can select out the column to work on, then use the built-in str (string) functions
# to replace hyphens (we do this and just print the result to screen)
views_df['show_watched'].str.replace("-", "")
# now we do the fix in-place
views_df['show_watched'] = views_df['show_watched'].str.replace("-", "")
# NOTE if you comment out the line above, you'll get a NaN in the final table
# as `battle-star` won't be joined
views_df
print("Index info:", views_df.index)
views_df.ix[22] # select the items with index 22 (note this is an integer, not string value)
shows_views_df = views_df.join(shows_df, on='show_watched')
shows_views_df
# take out two relevant columns, group by category, sum the views
shows_views_df[['views', 'category']].groupby('category').sum()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Change the index to be the showname
Step2: Do the same for views
Step3: Join on shows watched against category
|
7,304
|
<ASSISTANT_TASK:>
Python Code:
def parseRaw(json_map):
url = json_map['url']
content = json_map['html']
return (url,content)
import json
import pprint
pp = pprint.PrettyPrinter(indent=2)
path = "./pixnet.txt"
all_content = sc.textFile(path).map(json.loads).map(parseRaw)
def parseImgSrc(x):
try:
urls = list()
import lxml.html
from urlparse import urlparse
node = lxml.html.fromstring(x)
root = node.getroottree()
for src in root.xpath('//img/@src'):
try :
host = urlparse(src).netloc
if '.' not in host : continue
if host.count('.') == 1 :
pass
else:
host = host[host.index('.')+1:]
urls.append('imgsrc_'+host)
except :
print "Error Parse At:" , src
for src in root.xpath('//input[@src]/@src'):
try :
host = urlparse(src).netloc
if '.' not in host : continue
if host.count('.') == 1 :
pass
else:
host = host[host.index('.')+1:]
urls.append('imgsrc_'+host)
except :
print "Error parseImgSrc At:" , src
except :
print "Unexpected error:", sys.exc_info()
return urls
all_content.map(lambda x: x[1]).first()[:100]
image_list = all_content.map(lambda x :parseImgSrc(x[1]))
pp.pprint(image_list.first()[:10])
img_src_count = all_content.map(
lambda x :parseImgSrc(x[1])).flatMap(
lambda x: x).countByValue()
for i in img_src_count:
print i , ':' , img_src_count[i]
from operator import add
all_content.map(
lambda x :parseImgSrc(x[1])).flatMap(lambda x: x).map(lambda x: (x,1)).reduceByKey(add).sortBy(
lambda x: x[1], ascending=False).collect()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 載入原始 RAW Data
Step2: 利用 LXML Parser 來分析文章結構
Step3: 取出 Image Src 的列表
Step4: 統計 Image Src 的列表
Step5: <span style="color
|
7,305
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('seaborn')
from animerec.data import get_data
users, anime = get_data()
from sklearn.model_selection import train_test_split
train, test = train_test_split(users, test_size = 0.1) #let's split up the dataset into a train and test set.
train, valid = train_test_split(train, test_size = 0.2) #let's split up the dataset into a train and valid set.
from animerec.data import remove_users
train = remove_users(train, 10)
#define validation set
valid_users = valid['user_id']
valid_anime = valid['anime_id']
valid_ratings = valid['rating']
#initialize some local variables
nUsers = len(train.user_id.unique())
nAnime = len(train.anime_id.unique())
# we'll need some data structures in order to vectorize computations
from collections import defaultdict
user_ids = train.user_id
item_ids = train.anime_id
user_index = defaultdict(lambda: -1) # maps a user_id to the index in the bias term.
item_index = defaultdict(lambda: -1) # maps an anime_id to the index in the bias term.
counter = 0
for user in user_ids:
if user_index[user] == -1:
user_index[user] = counter
counter += 1
counter = 0
for item in item_ids:
if item_index[item] == -1:
item_index[item] = counter
counter += 1
import tensorflow as tf
y = tf.cast(tf.constant(train['rating'].as_matrix(), shape=[len(train),1]), tf.float32)
def objective(alpha, Bi, Bu, y, lam):
#construct the full items and user matrix.
Bi_full = tf.gather(Bi, train.anime_id.map(lambda _id: item_index[_id]).as_matrix())
Bu_full = tf.gather(Bu, train.user_id.map(lambda _id: user_index[_id]).as_matrix())
alpha_full = tf.tile(alpha, (len(train), 1))
return tf.reduce_sum(abs(alpha_full+Bi_full+Bu_full-y)) + lam * (tf.reduce_sum(Bi**2) + tf.reduce_sum(Bu**2))
#initialize alpha, Bi, Bu
alpha = tf.Variable(tf.constant([6.9], shape=[1, 1]))
Bi = tf.Variable(tf.constant([0.0]*nAnime, shape=[nAnime, 1]))
Bu = tf.Variable(tf.constant([0.0]*nUsers, shape=[nUsers, 1]))
optimizer = tf.train.AdamOptimizer(0.01)
obj = objective(alpha, Bi, Bu, y, 1)
trainer = optimizer.minimize(obj)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
tLoss = []
vLoss = []
prev = 10e10
for iteration in range(500):
cvalues = sess.run([trainer, obj])
print("objective = " + str(cvalues[1]))
tLoss.append(cvalues[1])
if not iteration % 5:
cAlpha, cBi, cBu, cLoss = sess.run([alpha, Bi, Bu, obj])
indices = valid_users.map(lambda x: user_index[x])
bu = indices.map(lambda x: 0.0 if x == -1 else float(cBu[x]))
indices = valid_anime.map(lambda x: item_index[x])
bi = indices.map(lambda x: 0.0 if x == -1 else float(cBi[x]))
preds = bu + bi + float(cAlpha)
MAE = 1.0/len(valid) * sum(abs(valid_ratings-preds))
vLoss.append(MAE)
if MAE > prev: break
else: prev = MAE
cAlpha, cBi, cBu, cLoss = sess.run([alpha, Bi, Bu, obj])
print("\nFinal train loss is ", cLoss)
fig, ax1 = plt.subplots()
plt.title('Linear model performance vs. iterations')
ax1.plot(tLoss, 'b-')
ax1.set_xlabel('Training Iterations')
ax1.set_ylabel('Train Loss')
ax2 = ax1.twinx()
ax2.plot(range(0, len(vLoss)*5, 5), vLoss, 'r.')
ax2.set_ylabel('Validation Classification MAE')
fig.tight_layout()
test_users = test['user_id']
test_anime = test['anime_id']
test_ratings = test['rating']
indices = test_users.map(lambda x: user_index[x])
bu = indices.map(lambda x: 0.0 if x == -1 else float(cBu[x]))
indices = test_anime.map(lambda x: item_index[x])
bi = indices.map(lambda x: 0.0 if x == -1 else float(cBi[x]))
preds = bu + bi + float(cAlpha)
MAE = 1.0/len(test) * sum(abs(test_ratings-preds))
print ('MAE on test set is: ', float(MAE))
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's plot the objective and see how it decreases.
Step2: So by 50 iterations, the model hits a bend and from there we see incremental improvement. We can see quite clearly that this model does not overfit. It's a very simple model (that likely underfits the data) and we use L2 regularization, so to that end it's not surprising at all. But it's good to confirm anyway.
|
7,306
|
<ASSISTANT_TASK:>
Python Code:
x = [ [2] * 3 ] * 3
x[0][0] = "ZZ"
print(*x, sep="\n")
out=[[0]*3]*3
print( id(out[0]) )
print( id(out[1]) ) # want to know what "id" is? Why not read the documentation!
a = [2] * 3
x = [a] * 3
print(*x, sep="\n")
print()
a[0] = "ZZ"
print(*x, sep="\n")
x = []
for i in range(3):
x.append([2]*3)
print(*x, sep="\n")
print()
x[0][0] = "ZZ"
print(*x, sep="\n")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What I wanted to do was build a nested list, x is supposed to look like
Step2: To see what's happening lets rewrite the code to make the issue even clearer
Step3: So basically the issue is was that when we write [2]*3 and try to add it to a list python isn't making three separate lists, rather, its adding the same list three times!
|
7,307
|
<ASSISTANT_TASK:>
Python Code:
import pathlib # for filepath path tooling
import lzma # to decompress the iCOM file
import numpy as np # for array tooling
import matplotlib.pyplot as plt # for plotting
# Makes it so that any changes in pymedphys is automatically
# propagated into the notebook without needing a kernel reset.
from IPython.lib.deepreload import reload
%load_ext autoreload
%autoreload 2
import pymedphys
patient_id = '015112'
icom_directory = pathlib.Path(r'\\physics-server\iComLogFiles\patients')
monaco_directory = pathlib.Path(r'\\monacoda\FocalData\RCCC\1~Clinical')
output_directory = pathlib.Path(r'S:\Physics\Patient Specific Logfile Fluence')
pdf_directory = pathlib.Path(r'P:\Scanned Documents\RT\PhysChecks\Logfile PDFs')
GRID = pymedphys.mudensity.grid()
COORDS = (GRID["jaw"], GRID["mlc"])
GAMMA_OPTIONS = {
'dose_percent_threshold': 2, # Not actually comparing dose though
'distance_mm_threshold': 0.5,
'local_gamma': True,
'quiet': True,
'max_gamma': 2,
}
all_tel_paths = list(monaco_directory.glob(f'*~{patient_id}/plan/*/tel.1'))
all_tel_paths
plan_names_to_choose_from = [
path.parent.name for path in all_tel_paths
]
plan_names_to_choose_from
icom_deliveries = list(icom_directory.glob(f'{patient_id}_*/*.xz'))
icom_deliveries
icom_files_to_choose_from = [
path.stem for path in icom_deliveries
]
icom_files_to_choose_from
monaco_plan_name = 'LeftIlium1' # plan directory name
icom_delivery = '20200213_133208' # iCOM timestamp
tel_path = list(monaco_directory.glob(f'*~{patient_id}/plan/{monaco_plan_name}/tel.1'))[-1]
tel_path
icom_path = list(icom_directory.glob(f'{patient_id}_*/{icom_delivery}.xz'))[-1]
icom_path
with lzma.open(icom_path, 'r') as f:
icom_stream = f.read()
# Print out available methods and attributes on the Delivery object
[command for command in dir(pymedphys.Delivery) if not command.startswith('_')]
delivery_icom = pymedphys.Delivery.from_icom(icom_stream)
delivery_tel = pymedphys.Delivery.from_monaco(tel_path)
mudensity_icom = delivery_icom.mudensity()
mudensity_tel = delivery_tel.mudensity()
def to_tuple(array):
return tuple(map(tuple, array))
gamma = pymedphys.gamma(
COORDS,
to_tuple(mudensity_tel),
COORDS,
to_tuple(mudensity_icom),
**GAMMA_OPTIONS
)
def plot_gamma_hist(gamma, percent, dist):
valid_gamma = gamma[~np.isnan(gamma)]
plt.hist(valid_gamma, 50, density=True)
pass_ratio = np.sum(valid_gamma <= 1) / len(valid_gamma)
plt.title(
"Local Gamma ({0}%/{1}mm) | Percent Pass: {2:.2f} % | Max Gamma: {3:.2f}".format(
percent, dist, pass_ratio * 100, np.max(valid_gamma)
)
)
def plot_and_save_results(
mudensity_tel,
mudensity_icom,
gamma,
png_filepath,
pdf_filepath,
header_text="",
footer_text="",
):
diff = mudensity_icom - mudensity_tel
largest_item = np.max(np.abs(diff))
widths = [1, 1]
heights = [0.3, 1, 1, 1, 0.1]
gs_kw = dict(width_ratios=widths, height_ratios=heights)
fig, axs = plt.subplots(5, 2, figsize=(10, 16), gridspec_kw=gs_kw)
gs = axs[0, 0].get_gridspec()
for ax in axs[0, 0:]:
ax.remove()
for ax in axs[1, 0:]:
ax.remove()
for ax in axs[4, 0:]:
ax.remove()
axheader = fig.add_subplot(gs[0, :])
axhist = fig.add_subplot(gs[1, :])
axfooter = fig.add_subplot(gs[4, :])
axheader.axis("off")
axfooter.axis("off")
axheader.text(0, 0, header_text, ha="left", wrap=True, fontsize=30)
axfooter.text(0, 1, footer_text, ha="left", va="top", wrap=True, fontsize=6)
plt.sca(axs[2, 0])
pymedphys.mudensity.display(GRID, mudensity_tel)
axs[2, 0].set_title("Monaco Plan MU Density")
plt.sca(axs[2, 1])
pymedphys.mudensity.display(GRID, mudensity_icom)
axs[2, 1].set_title("Recorded iCOM MU Density")
plt.sca(axs[3, 0])
pymedphys.mudensity.display(
GRID, diff, cmap="seismic", vmin=-largest_item, vmax=largest_item
)
plt.title("iCOM - Monaco")
plt.sca(axs[3, 1])
pymedphys.mudensity.display(GRID, gamma, cmap="coolwarm", vmin=0, vmax=2)
plt.title(
"Local Gamma | "
f"{GAMMA_OPTIONS['dose_percent_threshold']}%/"
f"{GAMMA_OPTIONS['distance_mm_threshold']}mm")
plt.sca(axhist)
plot_gamma_hist(
gamma,
GAMMA_OPTIONS['dose_percent_threshold'],
GAMMA_OPTIONS['distance_mm_threshold'])
return fig
results_dir = output_directory.joinpath(patient_id, tel_path.parent.name, icom_path.stem)
results_dir.mkdir(exist_ok=True, parents=True)
header_text = (
f"Patient ID: {patient_id}\n"
f"Plan Name: {tel_path.parent.name}\n"
)
footer_text = (
f"tel.1 file path: {str(tel_path)}\n"
f"icom file path: {str(icom_path)}\n"
f"results path: {str(results_dir)}"
)
png_filepath = str(results_dir.joinpath("result.png").resolve())
pdf_filepath = str(pdf_directory.joinpath(f"{patient_id}.pdf").resolve())
fig = plot_and_save_results(
mudensity_tel, mudensity_icom,
gamma, png_filepath, pdf_filepath,
header_text=header_text, footer_text=footer_text
)
fig.tight_layout()
plt.savefig(png_filepath, dpi=300)
plt.show()
!magick convert "{png_filepath}" "{pdf_filepath}"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing PyMedPhys
Step2: Patient ID Configuration
Step3: File Path Configurations
Step4: Output directories
Step5: MU Density and Gamma configuration
Step6: Choosing from the available Monaco Plans and iCOM Deliveries
Step7: iCOM Deliveries
Step8: Plan and iCOM choice
Step9: Resulting paths found matching provided search query
Step10: Loading the iCOM file
Step11: Delivery Objects
Step12: Creating the Delivery Objects
Step13: Using the Delivery Objects
Step14: Calculating Gamma
Step15: Create Plotting and Reporting Functions
Step16: Plotting and saving the report
Step17: Converting PNG to PDF for importing into Mosaiq
|
7,308
|
<ASSISTANT_TASK:>
Python Code:
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
# Training data is 100 points in [0,1] inclusive regularly spaced
train_x = torch.linspace(0, 1, 100)
# True function is sin(2*pi*x) with Gaussian noise
train_y = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2
# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# initialize likelihood and model
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPModel(train_x, train_y, likelihood)
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iter = 2 if smoke_test else 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iter):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(train_x)
# Calc loss and backprop gradients
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
class MeanVarModelWrapper(torch.nn.Module):
def __init__(self, gp):
super().__init__()
self.gp = gp
def forward(self, x):
output_dist = self.gp(x)
return output_dist.mean, output_dist.variance
with torch.no_grad(), gpytorch.settings.fast_pred_var(), gpytorch.settings.trace_mode():
model.eval()
test_x = torch.linspace(0, 1, 51)
pred = model(test_x) # Do precomputation
traced_model = torch.jit.trace(MeanVarModelWrapper(model), test_x)
with torch.no_grad():
traced_mean, traced_var = traced_model(test_x)
print(torch.norm(traced_mean - pred.mean))
print(torch.norm(traced_var - pred.variance))
traced_model.save('traced_exact_gp.pt')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trace the Model
Step2: Compare Predictions from TorchScript model and Torch model
|
7,309
|
<ASSISTANT_TASK:>
Python Code:
# Useful Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
bushels = 15000
spot_symbol = 'CORN'
futures_contract = symbols('CNU16')
spot_prices = get_pricing(spot_symbol, start_date = '2016-06-01', end_date = '2016-09-15', fields = 'price')
futures_prices = get_pricing(futures_contract, start_date = '2016-06-01',end_date='2016-09-15', fields='price')
# Sale date of corn
sale_date = '2016-09-14'
# Plotting
plt.plot(spot_prices);
plt.axvline(sale_date, linestyle='dashed', color='r', label='Sale Date')
plt.legend();
p = spot_prices.loc[sale_date]
market_profits = p * (bushels//6)
print 'profits from market price: $', market_profits
futures_entry_date = '2016-06-01'
futures_profits = futures_prices.loc[futures_entry_date] * bushels
print 'profits from future contract: $', futures_profits
lost_profits = futures_profits - market_profits
print 'Profits the producer lost in a year: $', int(lost_profits)
# A toy example to show Contango
N = 100 # Days to expiry of futures contract
cost_of_carry = 0.01
spot_price = pd.Series(np.ones(N), name = "Spot Price")
futures_price = pd.Series(np.ones(N), name = "Futures Price")
spot_price[0] = 1000
futures_price[0] = spot_price[0]*np.exp(cost_of_carry*N)
for n in range(1, N):
spot_price[n] = spot_price[n-1]*(1 + np.random.normal(0, 0.05))
futures_price[n] = spot_price[n]*np.exp(cost_of_carry*(N - n))
spot_price.plot()
futures_price.plot()
plt.legend()
plt.title('Contango')
plt.xlabel('Time')
plt.ylabel('Price');
# A toy example to show Contango
N = 100 # Days to expiry of futures contract
cost_of_carry = -0.01
spot_price = pd.Series(np.ones(N), name = "Spot Price")
futures_price = pd.Series(np.ones(N), name = "Futures Price")
spot_price[0] = 1000
futures_price[0] = spot_price[0]*np.exp(cost_of_carry*N)
for n in range(1, N):
spot_price[n] = spot_price[n-1]*(1 + np.random.normal(0, 0.05))
futures_price[n] = spot_price[n]*np.exp(cost_of_carry*(N - n))
spot_price.plot()
futures_price.plot()
plt.legend()
plt.title('Contango')
plt.xlabel('Time')
plt.ylabel('Price');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: We can clearly see that it would have been wiser for the farmer to sell his corn using a futures contract to lock in the price in June. Let's calculate the lost profit
Step3: Exercise 2
Step4: b. Backwardation
|
7,310
|
<ASSISTANT_TASK:>
Python Code:
def hamming_dist(s1, s2):
if len(s1) < len(s2):
s1, s2, = s2, s1
dist = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
dist+=1
return dist
hamming_dist('TGCATAT','ATCCGAT')
def normalize_string(text):
import string
text = text.lower()
text = text.translate(str.maketrans('','',
string.punctuation+
string.whitespace))
return text
def edit_distance(str1, str2):
'''
This function implements the Wagner-Fischer algorithm
to compute the edit distance between two strings
>>> edit_distance('bear', 'bar')
1
>>> edit_distance('"computer"', 'comm"uter')
1
>>> edit_distance('ea t', 'feat')
1
>>> edit_distance('bee!', 'beer')
1
>>> edit_distance('science', 'seance')
3
>>> edit_distance('laboratory', 'lobotomy')
4
'''
str1 = normalize_string(str1)
str2 = normalize_string(str2)
lenx = len(str1)+1
leny = len(str2)+1
d = [[0 for i in range(leny)] for i in range(lenx)]
for i in range(leny):
d[0][i] = i
for j in range(lenx):
d[j][0] = j
for j in range(1,leny):
for i in range(1,lenx):
if str1[i-1] == str2[j-1]:
d[i][j] = d[i-1][j-1]
else:
d[i][j] = min(d[i-1][j] +1,
d[i][j-1] +1,
d[i-1][j-1] +1)
return d[lenx-1][leny-1]
if __name__ == '__main__':
import doctest
doctest.testmod()
edit_distance('TGCATAT','ATCCGAT')
sq1 = "TGCATAT"
sq2 = "ATCCGAT"
sq3 = ""
def LCS(seq1, seq2, penality = 0):
from math import inf
xaxis = len(seq1)+1
yaxis = len(seq2)+1
#initialize matrix
mat = [[-inf] * xaxis for i in range(yaxis)]
track = [[""] * xaxis for i in range(yaxis)]
for i in range(yaxis):
mat[i][0] = penality*i
for i in range(xaxis):
mat[0][i] = penality*i
for x in range(1,yaxis):
for y in range(1,xaxis):
if seq1[y-1] == seq2[x-1]:
mat[x][y] = mat[x-1][y-1] + 1
track[x][y] = "diag"
else:
mat[x][y] = max(mat[x][y-1],
mat[x-1][y])
if mat[x][y] == mat[x][y-1]:
track[x][y] = "left"
else:
track[x][y] = "up"
for i in mat:
print(i)
print()
for i in track:
print(i)
return backtrack(mat, track, seq1, seq2)
def backtrack(m, t, s1, s2):
pos = [len(s1),len(s2)]
algn1 = ""
algn2 = ""
while t[pos[1]][pos[0]] != "":
if t[pos[1]][pos[0]] == "diag":
algn1 = s1[pos[0]-1] + algn1
algn2 = s2[pos[1]-1] + algn2
pos = [pos[0]-1, pos[1]-1]
elif t[pos[1]][pos[0]] == "left":
algn2 = "-" + algn2
algn1 = s1[pos[0]-1] + algn1
pos[0] -= 1
elif t[pos[1]][pos[0]] == "up":
algn2 = s2[pos[1]-1] + algn2
algn1 = "-" + algn1
pos[1] -= 1
if s1[0] != s2[0]:
while pos[0] != 0:
algn2 = "-" + algn2
algn1 = s1[pos[0]-1] + algn1
pos[0] -= 1
while pos[1] != 0:
algn2 = s2[pos[1]-1] + algn2
algn1 = "-" + algn1
pos[1] -= 1
print("\nOriginal sequnces:")
print(s1)
print(s2)
print("Alignment:")
print(algn1)
print(algn2)
LCS(sq1,sq2)
def globalAlignment(sequence1, sequence2):
rows = len(sequence1)+1
columns = len(sequence2)+1
scoringMatrix = [[0] * columns for _ in range(rows)]
directionMatrix = [["empty"] * columns for _ in range(rows)]
for i in range(1, rows):
for j in range(1, columns):
option1 = scoringMatrix[i-1][j] -1
option2 = scoringMatrix[i][j-1] -1
if sequence1[i-1] == sequence2[j-1]:
option3 = scoringMatrix[i-1][j-1] + 4
else:
option3 = scoringMatrix[i-1][j-1] - 2
scoringMatrix[i][j] = max(option1, option2, option3)
if scoringMatrix[i][j] == option3:
directionMatrix[i][j] = "diagonal"
elif scoringMatrix[i][j] == option2:
directionMatrix[i][j] = "left"
else:
directionMatrix[i][j] = "top"
return (scoringMatrix, directionMatrix)
sequence1 = "TATATCGTTAG"
sequence2 = "AATCTGAT"
global_result = globalAlignment(sequence1, sequence2)
global_result[0]
def localAlignment(sequence1, sequence2):
rows = len(sequence1)+1
columns = len(sequence2)+1
scoringMatrix = [[0] * columns for _ in range(rows)]
directionMatrix = [["empty"] * columns for _ in range(rows)]
for i in range(1, rows):
for j in range(1, columns):
option1 = scoringMatrix[i-1][j] -1
option2 = scoringMatrix[i][j-1] -1
if sequence1[i-1] == sequence2[j-1]:
option3 = scoringMatrix[i-1][j-1] + 4
else:
option3 = scoringMatrix[i-1][j-1] - 2
value = max(option1, option2, option3)
if value > 0:
scoringMatrix[i][j] = value
if scoringMatrix[i][j] == option3:
directionMatrix[i][j] = "diagonal"
elif scoringMatrix[i][j] == option2:
directionMatrix[i][j] = "left"
else:
directionMatrix[i][j] = "top"
else:
scoringMatrix[i][j] = 0
return (scoringMatrix, directionMatrix)
sequence1 = "TATATCGTTAG"
sequence2 = "AATCTGAT"
local_result = localAlignment(sequence1, sequence2)
local_result[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (b) Calculate the Edit distance between TGCATAT and ATCCGAT.
Step2: (c) Is there a unique Edit distance in b. If not then find the minimum distance.
Step3: c) Verify the relationship between optimal alignment score's and the minimum Edit distance d between two substrings u and v of length n and m.
|
7,311
|
<ASSISTANT_TASK:>
Python Code:
from typing import List
def string_xor(a: str, b: str) -> str:
def xor(i, j):
if i == j:
return '0'
else:
return '1'
return ''.join(xor(x, y) for x, y in zip(a, b))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,312
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ecell4.prelude import *
citation(125616)
citation(168932)
with reaction_rules():
2 * ATP > 2 * A13P2G + 2 * ADP | (3.2 * ATP / (1.0 + (ATP / 1.0) ** 4.0))
A13P2G > A23P2G | 1500
A23P2G > PEP | 0.15
A13P2G + ADP > PEP + ATP | 1.57e+4
PEP + ADP > ATP | 559
# AMP + ATP > 2 * ADP | (1.0 * (AMP * ATP - 2.0 * ADP * ADP))
AMP + ATP > 2 * ADP | 1.0 * AMP * ATP
2 * ADP > AMP + ATP | 2.0 * ADP * ADP
ATP > ADP | 1.46
m = get_model()
show(m)
y0 = {"A13P2G": 0.0005082, "A23P2G": 5.0834, "PEP": 0.020502,
"AMP": 0.080139, "ADP": 0.2190, "ATP": 1.196867}
ret = run_simulation(100, model=m, y0=y0)
ret
import numpy
w = ret.world
sim = ode.Simulator(w, m)
numpy.array(get_stoichiometry(m.list_species(), m.reaction_rules()))
numpy.array(w.evaluate(m.reaction_rules()))
x = numpy.array(sim.values())
x
dxdt = numpy.array(sim.derivatives())
dxdt
J = numpy.array(sim.jacobian())
J
E = numpy.array(sim.elasticity())
E
S = numpy.array(sim.stoichiometry())
S
v = numpy.array(sim.fluxes())
v
numpy.isclose(dxdt, S @ v)
numpy.isclose(J, S @ E)
from ecell4.mca import *
(link_matrix, kernel_matrix, independent_list) = generate_full_rank_matrix(S)
print(link_matrix)
print(kernel_matrix)
print(independent_list)
reduced_matrix = numpy.take(S, independent_list, 0)
reduced_matrix
S == link_matrix @ reduced_matrix
0 == S @ kernel_matrix
ccc, fcc = unscaled_control_coefficients(S, E)
print(ccc)
print(fcc)
numpy.isclose(fcc @ (E @ link_matrix), 0)
numpy.isclose(numpy.take(ccc, independent_list, 0) @ (E @ link_matrix), -numpy.identity(link_matrix.shape[1]))
ccc, fcc = scaled_control_coefficients(S, E, v, x)
print(ccc)
print(fcc)
numpy.isclose(ccc.sum(axis=1), 0)
numpy.isclose(fcc.sum(axis=1), 1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A Simple Model of the Glycolysis of Human Erythrocytes
Step2: The model consists of seven reactions and is at the steady state.
Step3: Metabolic Control Analysis
Step4: First of all, get_stoichiometry gives a stoichiometry matrix from the given species and reactions as follows
Step5: The evaluate method of ode.ODEWorld returns current fluxes of the given reactions.
Step6: ode.ODESimulator has methods for the fundamental properties related to metabolic control analysis.
Step7: ode.ODESimulator also provides a stoichiometry matrix and fluxes.
Step8: These properties satisfy some relations at the steady state.
Step9: $\mathbf{J} = \frac{\mathrm{d}^2}{\mathrm{d}t^2}\mathbf{x} = \mathbf{S}\left(\frac{\mathrm{d}}{\mathrm{d}t}\mathbf{v}\right) = \mathbf{S} \mathbf{E}$
Step10: Next, the ecell4.mca submodule provides useful functions for metabolic network and control analyses.
Step11: generate_full_rank_matrix gives square matrix to be full rank. In this model, 5 out of 8 reactions are independent.
Step12: The original stoichiometry matrix can be reproduced from these reduced matrices
Step13: Finally, ecell4.mca provides two functions, unscaled_control_coefficients and scaled_control_coefficients, which calculate concentration and flux control coefficients from stoichiometry and elasticity matrices.
Step14: Unscaled control coefficients satisfies connectivity theorem
Step15: and
Step16: On the other hand, scaled control coefficients
Step17: satisfies summation theorem
Step18: and
|
7,313
|
<ASSISTANT_TASK:>
Python Code:
# Python imports
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
series1 = pd.Series([1,2,3,4])
print(series1)
df1 = pd.DataFrame([[1,2,3,4],[10,20,30,40]])
print(df1)
df1
# Rename the columns
df1.columns = ['A','B','C','D']
df1.index = ['zero','one']
df1
# Create the dataframe with the columns
df1 = pd.DataFrame([[1,2,3,4],[10,20,30,40]], columns=['A','B','C',"D"], index=['zero','one'])
df1
df1 = pd.DataFrame(np.random.randn(5,4), columns = ['A','B','C','D'], index=['zero','one','two','three','four'])
print(df1)
df1.to_csv('datafiles/pandas_df1.csv')
!ls datafiles
df2 = pd.read_csv('datafiles/pandas_df1.csv', index_col=0)
print(df2)
df1.to_hdf('datafiles/pandas_df1.h5', 'df')
!ls datafiles
df2 = pd.read_hdf('datafiles/pandas_df1.h5', 'df')
print(df2)
df2.dtypes
col1 = range(6)
col2 = np.random.rand(6)
col3 = ['zero','one','two','three','four','five']
col4 = ['blue', 'cow','blue', 'cow','blue', 'cow']
df_types = pd.DataFrame( {'integers': col1, 'floats': col2, 'words': col3, 'cow color': col4} )
print(df_types)
df_types.dtypes
df_types['cow color'] = df_types['cow color'].astype("category")
df_types.dtypes
time_stamps = pd.date_range(start='2000-01-01', end='2000-01-20', freq='D') # Define index of time stamps
df1 = pd.DataFrame(np.random.randn(20,4), columns = ['A','B','C','D'], index=time_stamps)
print(df1)
df1.head(3) # Show the first n rows, default is 5
df1.tail() # Show the last n rows
df1.columns
df1.values
df1['A'].head() # df1.A.head() is equivalent
df1.A
df1[:2]
df1.loc[:'2000-01-5',"A"] # Note that this includes the upper index
df1.iloc[:3,0] # Note that this does not include the upper index like numpy
index_timestamp = pd.Timestamp('2000-01-03') # Create a timestamp object to index
df1.at[index_timestamp,"A"] # Index using timestamp (vs string)
df1.iat[3,0]
df1.head()>0.5
df1[df1>0.5].head() # Note that the values that were 'False' are 'NaN'
df_types
bool_series = df_types['cow color'].isin(['blue'])
print(bool_series) # Show the logical indexing
df_types[bool_series] # Index where the values are true
df_types.sort_values(by="floats")
df_nan = pd.DataFrame(np.random.rand(6,2), columns = ['A','B'])
df_nan
df_nan['B'] = df_nan[df_nan['B']>0.5] # Prints NaN Where ['B'] <= 0.5
print(df_nan)
df_nan.isnull()
df_nan.dropna(how = 'any')
df_nan.fillna(value = -1)
df1 = pd.DataFrame(np.zeros([3,3], dtype=np.int))
df1
df2 = pd.concat([df1, df1], axis=0)
df2 = df2.reset_index(drop=True) # Renumber indexing
df2
newdf = pd.DataFrame({0: [1], 1:[1], 2:[1]})
print(newdf)
df3 = df2.append(newdf, ignore_index=True)
df3
left = pd.DataFrame({'numbers': ['K0', 'K1', 'K2', 'K3'],
'English': ['one', 'two', 'three', 'four'],
'Spanish': ['uno', 'dos', 'tres', 'quatro'],
'German': ['erste', 'zweite','dritte','vierte']})
left
right = pd.DataFrame({'numbers': ['K0', 'K1', 'K2', 'K3'],
'French': ['un', 'deux', 'trois', 'quatre'],
'Afrikaans': ['een', 'twee', 'drie', 'vier']})
right
result = pd.merge(left, right, on='numbers')
result
dfg = pd.DataFrame({'A': ['clogs','sandals','jellies']*2,
'B': ['socks','footies']*3,
'C': [1,1,1,3,2,2],
'D': np.random.rand(6)})
dfg
dfg.pivot_table(index=['A','B'], columns=['C'], values='D')
dfg.stack()
dfg.groupby(['B']).count()
dfg.groupby(['A']).mean()
dfg['D'].mean()
dfg['D']
dfg_Ds = dfg['D'].shift(2)
dfg_Ds
dfg['D'].div(dfg_Ds )
dfg
dfg['C'].value_counts()
df_types.describe()
df_types.T
def f(x): # Define function
return x + 1
dfg['C'].apply(f)
dfg['C'].apply(lambda x: x + 1)
dfg['A'].str.title() # Make the first letter uppercase
n = 100
X = np.linspace(0, 5, n)
Y1,Y2 = np.log((X)**2+2), np.sin(X)+2
dfp = pd.DataFrame({'X' : X, 'Y1': Y1, 'Y2': Y2})
dfp.head()
dfp.plot(x = 'X')
plt.show()
style_name = 'classic'
plt.style.use(style_name)
dfp.plot(x = 'X')
plt.title('Log($x^2$) and Sine', fontsize=16)
plt.xlabel('X Label', fontsize=16)
plt.ylabel('Y Label', fontsize=16)
plt.show()
mpl.rcdefaults() # Reset matplotlib rc defaults
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pandas Series and DataFrame objects
Step2: Dataframes use the IPython display method to look pretty, but will show just fine when printed also. (There's a way to make all of the dataframes print pretty via the IPython.display.display method, but this isn't necessary to view the values)
Step3: Indices can be named
Step4: Data Input Output
Step5: CSV Files
Step6: hdf5 files
Step7: Data types
Step8: We can create dataframes of multiple datatypes
Step9: We can also set the 'cow color' column to a category
Step10: Indexing and Setting Data
Step11: Head and Tail
Step12: We can also separate the metadata (labels, etc) from the data, yielding a numpy-like output.
Step13: Indexing Data
Step14: Note that tab completion is enabled for column names
Step15: <div>
Step16: Label based indexing (.loc)
Step17: Integer based indexing (.iloc)
Step18: Fast single element label indexing (.at) - fast .loc
Step19: Fast single element label indexing (.iat) - fast .iloc
Step20: Logical indexing
Step21: That matrix can then be used to index the DataFrame
Step22: Logical indexing via isin
Step23: Sorting by column
Step24: Dealing with Missing Data
Step25: Print a logical DataFrame where NaN is located
Step26: Drop all rows with NaN
Step27: Replace NaN entries
Step28: Concatenating and Merging Data
Step29: Append
Step30: SQL-like merging
Step31: Grouping Operations
Step32: Pivot Table
Step33: Stacking
Step34: Groupby
Step35: Operations on Pandas Data Objects
Step36: Rotation
Step37: Add, subtract, multiply, divide
Step38: Histogram
Step39: Describe
Step40: Transpose
Step41: Applying Any Function to Pandas Data Objects
Step42: Lambda functions may also be used
Step43: String functions
Step44: Plotting
Step45: Matplotlib styles are available too
|
7,314
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from nupic.research.frameworks.dynamic_sparse.common.browser import *
import matplotlib.pyplot as plt
from matplotlib import rcParams
from scipy.ndimage.filters import gaussian_filter1d
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set(style="whitegrid")
sns.set_palette("colorblind")
exps = ['comparison_pruning_2' , 'comparison_iterative_pruning_2', 'comparison_set_2']
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
df = load_many(paths)
df.head(5)
df.shape
df.columns
df['model'].unique()
# calculate density for each model
df.loc[df['model'] == 'PruningModel', 'density'] = df.loc[df['model'] == 'PruningModel', 'target_final_density']
df.loc[df['model'] == 'IterativePruningModel', 'density'] = df.loc[df['model'] == 'IterativePruningModel', 'target_final_density']
df.loc[df['model'] == 'SET', 'density'] = df.loc[df['model'] == 'SET', 'on_perc']
# Did any trials failed?
num_epochs = 200
df[df["epochs"]<num_epochs]["epochs"].count()
# Removing failed or incomplete trials
df_origin = df.copy()
df = df_origin[df_origin["epochs"]>=30]
df.shape
# helper functions
def mean_and_std(s):
return "{:.3f} ± {:.3f}".format(s.mean(), s.std())
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
agg(['density', 'model'])
# translate model names
rcParams['figure.figsize'] = 16, 8
sns.scatterplot(data=df, x='density', y='val_acc_max', hue='model')
sns.lineplot(data=df, x='density', y='val_acc_max', hue='model', legend=False);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and check data
Step2: ## Analysis
Step3: Plot accuracy over epochs
|
7,315
|
<ASSISTANT_TASK:>
Python Code:
class MyIter(object):
def __init__(self, lst):
self.lst = lst
self.i = 0
def __iter__(self):
self.i = 0
return self
def __next__(self):
if self.i < len(self.lst):
nxt = self.lst[self.i]
self.i +=1
return nxt
else:
raise StopIteration
m = MyIter([1, 2, 3, 4, 5, 6])
for a in m:
print(a)
class MyDummy(object):
def __init__(self):
self.lst = [1, 2, 3, 4, 5, 6]
self.i = 0
def __call__(self):
ret = self.lst[self.i]
self.i += 1
return ret
d = MyDummy()
for a in iter(d, 3):
print(a, end=" ")
m = MyIter([1, 2, 3, 4, 5, 6])
for a in iter(m):
print(a, end=" ")
st = "Welcome to the city of lakes"
for a in iter(st):
print(a, end=" ")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: iter()
Step2: lets try another example, this time lets take a string
|
7,316
|
<ASSISTANT_TASK:>
Python Code:
%%html
<video width="560" height="315" src="https://storage.googleapis.com/scanner-data/public/sample-clip.mp4?ignore_cache=1" controls />
import util
path = util.download_video()
print(path)
# Read all the frames
%matplotlib inline
import matplotlib.pyplot as plt
import cv2
from timeit import default_timer as now
print('Reading frames from video...')
start = now()
video = cv2.VideoCapture(path)
frames = []
while True:
ret, frame = video.read()
if not ret: break
frames.append(frame)
print(len(frames))
video.release()
read_frame_time = now() - start
print('Time to read frames: {:.3f}s'.format(read_frame_time))
# Display the tenth frame
plt.imshow(cv2.cvtColor(frames[10], cv2.COLOR_RGB2BGR))
_ = plt.axis('off')
import numpy as np
from scipy.spatial import distance
from tqdm import tqdm
histograms = []
N = len(frames)
# Compute 3 color histograms (one for each channel) for each video frame
print('Computing color histograms...')
start = now()
for frame in tqdm(frames):
hists = [cv2.calcHist([frame], [channel], None, [16], [0, 256])
for channel in range(3)]
histograms.append(hists)
compute_hist_time = now() - start
print('Time to compute histograms: {:.3f}s'.format(compute_hist_time))
# Compute differences between adjacent pairs of histograms
def compute_histogram_diffs(histograms):
diffs = []
for i in range(1, N):
frame_diffs = [distance.chebyshev(histograms[i-1][channel], histograms[i][channel])
for channel in range(3)]
avg_diff = np.mean(frame_diffs)
diffs.append(avg_diff)
return diffs
diffs = compute_histogram_diffs(histograms)
# Plot the differences
plt.rcParams["figure.figsize"] = [16, 9]
plt.xlabel("Frame number")
plt.ylabel("Difference from previous frame")
_ = plt.plot(range(1, N), diffs)
import math
WINDOW_SIZE = 500 # The size of our sliding window (how many data points to include)
OUTLIER_STDDEV = 3 # Outliers are N standard deviations away from the mean of the sliding window
def find_shot_boundaries(diffs):
boundaries = []
for i in range(1, N):
window = diffs[max(i-WINDOW_SIZE,0):min(i+WINDOW_SIZE,N)]
if diffs[i-1] - np.mean(window) > OUTLIER_STDDEV * np.std(window):
boundaries.append(i)
return boundaries
boundaries = find_shot_boundaries(diffs)
print('Shot boundaries are:')
print(boundaries)
def tile(imgs, rows=None, cols=None):
# If neither rows/cols is specified, make a square
if rows is None and cols is None:
rows = int(math.sqrt(len(imgs)))
if rows is None:
rows = (len(imgs) + cols - 1) // cols
else:
cols = (len(imgs) + rows - 1) // rows
# Pad missing frames with black
diff = rows * cols - len(imgs)
if diff != 0:
imgs.extend([np.zeros(imgs[0].shape, dtype=imgs[0].dtype) for _ in range(diff)])
return np.vstack([np.hstack(imgs[i * cols:(i + 1) * cols]) for i in range(rows)])
montage = tile([frames[i] for i in boundaries])
plt.imshow(cv2.cvtColor(montage, cv2.COLOR_RGB2BGR))
_ = plt.axis('off')
from scannerpy import Client, DeviceType, PerfParams, CacheMode
from scannerpy.storage import NamedVideoStream, NamedStream
import scannertools.imgproc
sc = Client()
stream = NamedVideoStream(sc, 'example', path)
frame = sc.io.Input([stream])
histogram = sc.ops.Histogram(
frame = frame,
device = DeviceType.CPU) # Change this to DeviceType.GPU if you have a GPU
output = NamedStream(sc, 'example_hist')
output_op = sc.io.Output(sc.streams.Range(histogram, [(0, 2000)]), [output])
start = now()
sc.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite)
scanner_time = now() - start
print('Time to decode + compute histograms: {:.3f}'.format(scanner_time))
print('Scanner was {:.2f}x faster'.format((read_frame_time + compute_hist_time) / scanner_time))
from pprint import pprint
histograms = list(output.load())
# Run the same shot detection pipeline as before
diffs = compute_histogram_diffs(histograms)
boundaries = find_shot_boundaries(diffs)
montage = tile([frames[i] for i in boundaries])
plt.imshow(cv2.cvtColor(montage, cv2.COLOR_RGB2BGR))
_ = plt.axis('off')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We've set up some scripts to help you download the video in the snippet below.
Step2: Take another look at the video and see if you can identify when shots change. Our shot segmentation algorithm uses the following intuition
Step3: This plot shows, for each frame, the difference between its color histograms and the previous frame's color histograms. Try playing around with the number of histogram bins as well as the distance metric. As you can see, there are a number of sharp peaks interspersed throughout the video that likely correspond to shot boundaries. We can run a sliding window over the above graph to find the peaks
Step4: And we've done it! The video is now segmented in shots. At this point, you're probably wondering
Step5: In Scanner, all data is organized into streams, or lazy lists of elements. Videos are streams where each element is a frame. We can create a stream from a video by defining a NamedVideoStream pointing to the video path. The name allows Scanner to store some metadata about the video in a local database that we use to optimize video decode at runtime.
Step6: Computations in Scanner are defined in a data-parallel manner--that is, you write a computation that takes in one (or a few) frames at a time, and then the Scanner runtime runs your computation in parallel across your video. Here, we define a computation that computes a color histogram for each frame in the video. This is done by defining a series of "ops" (operators, similar to TensorFlow)
|
7,317
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ipp_macro_series_parser.agregats_transports.parser_cleaner_prix_carburants import prix_mensuel_carburants_90_15
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_carburants
prix_mensuel_carburants_90_15[['annee'] + ['mois']] = prix_mensuel_carburants_90_15[['annee'] + ['mois']].astype(str)
prix_mensuel_carburants_90_15['date'] = \
prix_mensuel_carburants_90_15['annee'] + '_' + prix_mensuel_carburants_90_15['mois']
prix_mensuel_carburants_90_15 = prix_mensuel_carburants_90_15.set_index('date')
prix_mensuel_carburants_90_15.rename(columns = {'diesel_ht': 'prix diesel ht', 'diesel_ttc': 'prix diesel ttc',
'super_95_ht': 'prix SP95 ht', 'super_95_ttc': 'prix SP95 ttc'},
inplace = True)
print 'Evolution du prix des carburants entre 1990 et 2015'
graph_builder_carburants(
prix_mensuel_carburants_90_15[['prix SP95 ttc'] + ['prix diesel ttc'] + ['prix SP95 ht'] + ['prix diesel ht']],
'prix carburants', 0.39, 1.025, 'darkgreen', 'darkred', 'lawngreen', 'orangered')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utilisation de la date comme index
Step2: Changement des noms des variables pour être plus explicites
Step3: Réalisation du graphique
|
7,318
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib._png import read_png
# load the image
imageFileName = '../data/figure_3/3d_data/images/rough.png'
imRead = read_png(imageFileName)
# and plot
fig = plt.figure(figsize=(9, 5))
ax = fig.add_subplot(111)
ax.imshow(imRead)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_frame_on(False)
# load the image
imageFileName = '../data/figure_3/3d_data/images/truncated_cone.png'
imRead = read_png(imageFileName)
# and plot
fig = plt.figure(figsize=(9, 5))
ax = fig.add_subplot(111)
ax.imshow(imRead)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_frame_on(False)
# define colours used in plots
dark_purple = '#8464c5'
light_purple = '#ededfb'
dark_green = '#336433'
light_green = '#a0d9a0'
white = '#FFFFFF'
olive = '#aaa460'
def base_plot(mz, mz_original, H_up):
Function to plot the mz vs. H hysteresis curves.
Adds colour shading to the different regions occuring throughout the hysteresis.
Requires the values of mz, dmdH_scaled and H, the array of field steps on the hysteresis loop.
The plot is returned
# create the figure and define an axis paramerter.
fig = plt.figure(figsize=(9, 5))
ax = fig.add_subplot(111)
# plot mz vs. H values.
ax.plot(H_up[0:400], mz[0:400], 'r--', linewidth=2.5, label="Distorted")
ax.plot(H_up[0:400], mz_original[0:400], 'b-.', linewidth=2.5, label="Original")
# add axis labels
plt.ylabel(r'm$_{\mathrm{z}}$', fontsize=20)
plt.xlabel(r'H ($\times$M$_{\mathrm{s}}$)', fontsize=20)
plt.xticks([-4, -3 ,-2, -1, 0, 1, 2, 3, 4], fontsize=18)
plt.yticks([-1, -0.5, 0, 0.5, 1], fontsize=18)
plt.xlim([-3, 3])
# add the legend
plt.legend(loc='lower right', fontsize=16)
plt.tight_layout()
return plt, ax
mx, my, mz_original, energy = np.load('../data/figure_1/hysteresis_loops/sim_hysteresis_FeGe_nanodisk_d150_h70.npy')
mx, my, mz, energy = np.load('../data/figure_3/hysteresis_loops/sim_hysteresis_FeGe_rough_disk_d150_h70.npy')
# create arrays for the Zeeman field
H_up = np.linspace(-4, 4, 400, endpoint=False)
plt, ax = base_plot(mz, mz_original, H_up)
plt.savefig('pdfs/figure-3-rough-comparison.pdf')
plt.show()
mx, my, mz_original, energy = np.load('../data/figure_1/hysteresis_loops/sim_hysteresis_FeGe_nanodisk_d150_h70.npy')
mx, my, mz, energy = np.load('../data/figure_3/hysteresis_loops/sim_hysteresis_FeGe_cutcone_d150_d140_h70.npy')
# create arrays for the Zeeman field
H_up = np.linspace(-4, 4, 400, endpoint=False)
plt, ax = base_plot(mz, mz_original, H_up)
plt.savefig('pdfs/figure-3-tapered-cylinder-comparison.pdf')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Figure 3(i)
Step2: Figure 3(ii)
Step4: Hysteresis Data
Step5: The plots are produced below, showing the results for t=70nm. The original data from the smooth, regular cylinder is also included in the plot for comparison.
Step6: Hysteresis of tapered cylinder
|
7,319
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v2 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
# Example of a picture
index = 13
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
predictions_train = predict(train_x, train_y, parameters)
predictions_test = predict(test_x, test_y, parameters)
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 5-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization.
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
pred_train = predict(train_x, train_y, parameters)
pred_test = predict(test_x, test_y, parameters)
print_mislabeled_images(classes, test_x, test_y, pred_test)
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 - Dataset
Step2: The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
Step3: As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below.
Step5: $12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector.
Step6: Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
Step7: Expected Output
Step8: Expected Output
Step10: Expected Output
Step11: You will now train the model as a 5-layer neural network.
Step12: Expected Output
Step13: <table>
Step14: Expected Output
Step15: A few type of images the model tends to do poorly on include
|
7,320
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import sys
import pandas as pd
import sqlalchemy as sa
import pudl
import warnings
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.handlers = [handler]
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
plt.style.use('ggplot')
mpl.rcParams['figure.figsize'] = (10,4)
mpl.rcParams['figure.dpi'] = 150
pd.options.display.max_columns = 56
pudl_settings = pudl.workspace.setup.get_defaults()
ferc1_engine = sa.create_engine(pudl_settings['ferc1_db'])
pudl_engine = sa.create_engine(pudl_settings['pudl_db'])
pudl_out_orig = pudl.output.pudltabl.PudlTabl(pudl_engine, freq=None)
gens_eia860_orig = pudl_out_orig.gens_eia860()
gens_eia860_orig.sample(10)
pudl.validate.plot_vs_bounds(gens_eia860_orig, pudl.validate.gens_eia860_vs_bound)
pudl.validate.plot_vs_self(gens_eia860_orig, pudl.validate.gens_eia860_self)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the original EIA 860 data
Step2: Validation Against Fixed Bounds
Step3: Capacity
Step4: Validating Historical Distributions
|
7,321
|
<ASSISTANT_TASK:>
Python Code:
class Events():
def __init__(self, start_times, labels):
last item must be sentinel with no label
assert(len(labels) >= len(start_times) - 1)
if len(labels) < len(start_times):
labels = labels.append(pd.Series([np.nan]))
self._df = pd.DataFrame({'start': start_times, 'label': labels}, columns=['start', 'label'])
def df(self):
return self._df
class Segments():
def __init__(self, start_times, labels):
last item must be sentinel with NaN label
self._df = segments_from_events(start_times, labels)
def df(self):
return self._df
def join(self, other):
sentinel_value = '_END_'
def add_sentinel(df):
last_event = df[-1:]
return df.append(pd.DataFrame({
'start': last_event['end'],
'end': last_event['end'],
'duration': 0.0,
'label': sentinel_value
}, columns=last_event.columns))
def remove_sentinel(df, cols):
for col in cols:
df[col] = df[col].apply(lambda v: np.nan if v == sentinel_value else v)
self_df = add_sentinel(self.df())[['start', 'label']].set_index('start')
other_df = add_sentinel(other.df())[['start', 'label']].set_index('start')
joined_df = self_df.join(other_df, lsuffix='_left', rsuffix='_right', how='outer')
joined_df.fillna(method='ffill', inplace=True)
remove_sentinel(joined_df, ['label_right', 'label_left'])
joined_df['label_equals'] = joined_df['label_left'] == joined_df['label_right']
joined_df.reset_index(inplace=True)
joined_df['end'] = joined_df['start'].shift(-1)
joined_df['duration'] = joined_df['end'] - joined_df['start']
joined_df = joined_df[:-1]
return joined_df #Segments(joined_df['start'], joined_df['label'])
annotations = Segments(np.array([0, 1, 2, 3, 3.5, 4]), ['A','B','A','C','A'])
annotations.df()
plot_segments(annotations.df())
estimations = Segments(np.array([0, 0.9, 1.8, 2.5, 3.1, 3.4, 4.5]), ['A','B','A','B','C','A'])
estimations.df()
plot_segments(estimations.df())
def join_segments(df1, df2):
Joins two dataframes with segments into a single one (ignoring labels)
np.hstack(events_from_segments(df1), events_from_segments(df1))
events = np.hstack([events_from_segments(annotations), events_from_segments(estimations)])
events.sort()
events = np.unique(events)
events
merged = segments_from_events(events)
merged
plot_segments(merged)
merged_df = annotations.join(estimations)
merged_df
def chord_symbol_recall(pred_segments, true_segments):
merged_df = pred_segments.join(true_segments)
return merged_df[merged_df['label_equals']]['duration'].sum() / merged_df['duration'].sum()
chord_symbol_recall(estimations, annotations)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Representation of events
Step4: [Weighted] Chord Symbol Recall
|
7,322
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plot
import seaborn
from sklearn import datasets
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
%matplotlib inline
digits = datasets.load_digits()
print(digits.DESCR)
k_folds = KFold(n_splits=10)
c_values = numpy.logspace(-10, 0, 10)
model = svm.SVC(kernel='linear')
scores = [cross_val_score(model.set_params(C=c_value),
digits.data,
digits.target,
cv=k_folds,
n_jobs=-1) for c_value in c_values]
means = numpy.array([numpy.mean(score) for score in scores])
deviations = numpy.array([numpy.std(score) for score in scores])
seaborn.set_style("whitegrid")
figure = plot.figure()
axe = figure.gca()
line = plot.plot(c_values, means, axes=axe)
line = plot.plot(c_values, means + deviations, '--')
line = plot.plot(c_values, means - deviations, '--')
title = axe.set_title("Accuracy vs C")
label = axe.set_ylabel("Accuracy")
labels = axe.set_ylabel("C Value")
axe.set_xscale('log')
best_index = means.argmax()
best_c = c_values[means.argmax()]
best_mean = means[best_index]
best_std = deviations[best_index]
print("Best C-value: {0:.5f}".format(best_c))
print("95% Confidence Interval for Accuracy: ({0:.2f} +/- {1:.2f})".format(best_mean,
best_std))
model = svm.SVC(kernel="linear")
scores = cross_val_score(model, digits.data, digits.target, cv=k_folds, n_jobs=-1)
mean = numpy.mean(scores)
std = numpy.std(scores)
print("95% Confidence Interval for accuracy of default C-value: ({0:.2f} +/- {1:.2f})".format(mean, std))
best_mean - mean
model.C
c_values = numpy.linspace(0, 1)[1:]
scores = [cross_val_score(model.set_params(C=c_value),
digits.data,
digits.target,
cv=k_folds,
n_jobs=-1) for c_value in c_values]
means = numpy.array([numpy.mean(score) for score in scores])
deviations = numpy.array([numpy.std(score) for score in scores])
seaborn.set_style("whitegrid")
figure = plot.figure()
axe = figure.gca()
line = plot.plot(c_values, means, axes=axe)
line = plot.plot(c_values, means + deviations, '--')
line = plot.plot(c_values, means - deviations, '--')
title = axe.set_title("Accuracy vs C")
label = axe.set_ylabel("Accuracy")
labels = axe.set_ylabel("C Value")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data
Step2: Set up 10 Folds Cross Validation
Step3: Set up the logarithmic C-values
Step4: Set up the linear Support Vector Classifier
Step5: Get cross-validation scroes for each C-value
Step6: Plot the mean-scores and standard deviations.
Step7: It looks like the default does well as the best model that I found by changing the C-values, even though it is set to 1.
|
7,323
|
<ASSISTANT_TASK:>
Python Code:
# 第一步当然是引入PyTorch及相关包
import torch
import torch.nn as nn
import torch.optim
from torch.autograd import Variable
import numpy as np
import glob
import unicodedata
import string
# all_letters 即课支持打印的字符+标点符号
all_letters = string.ascii_letters + " .,;'-"
# Plus EOS marker
n_letters = len(all_letters) + 1
EOS = n_letters - 1
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicode_to_ascii("O'Néàl"))
# 姓氏中所有的可视字符
print('all_letters: ', all_letters)
# 所有字符的长度 +1 EOS结束符
print('n_letters: ', n_letters)
# 结束符,没有实质内容
print('EOS: ', EOS)
# 按行读取出文件中的名字,并返回包含所有名字的列表
def read_lines(filename):
lines = open(filename).read().strip().split('\n')
return [unicode_to_ascii(line) for line in lines]
# category_lines是一个字典
# 其中索引是国家名字,内容是从文件读取出的这个国家的所有名字
category_lines = {}
# all_categories是一个列表
# 其中包含了所有的国家名字
all_categories = []
# 循环所有文件
for filename in glob.glob('./names/*.txt'):
# 从文件名中切割出国家名字
category = filename.split('/')[-1].split('.')[0]
# 将国家名字添加到列表中
all_categories.append(category)
# 读取对应国别文件中所有的名字
lines = read_lines(filename)
# 将所有名字存储在字典中对应的国别下
category_lines[category] = lines
# 共有的国别数
n_categories = len(all_categories)
print('# categories: ', n_categories, all_categories)
print()
print('# Russian names: ', category_lines['Russian'][:10])
# 再统计下手头共有多少条训练数据
all_line_num = 0
for key in category_lines:
all_line_num += len(category_lines[key])
print(all_line_num)
import random
def random_training_pair():
# 随机选择一个国别名
category = random.choice(all_categories)
# 读取这个国别名下的所有人名
line = random.choice(category_lines[category])
return category, line
print(random_training_pair())
# 将名字所属的国家名转化为“独热向量”
def make_category_input(category):
li = all_categories.index(category)
return li
print(make_category_input('Italian'))
def make_chars_input(nameStr):
name_char_list = list(map(lambda x: all_letters.find(x), nameStr))
return name_char_list
def make_target(nameStr):
target_char_list = list(map(lambda x: all_letters.find(x), nameStr[1:]))
target_char_list.append(n_letters - 1)# EOS
return target_char_list
def random_training_set():
# 随机选择数据集
category, line = random_training_pair()
#print(category, line)
# 转化成对应 Tensor
category_input = make_category_input(category)
line_input = make_chars_input(line)
#category_name_input = make_category_name_input(category, line)
line_target = make_target(line)
return category_input, line_input, line_target
#return category_name_input, line_target
print(random_training_set())
# 一个手动实现的LSTM模型,
class LSTMNetwork(nn.Module):
def __init__(self, category_size, name_size, hidden_size, output_size, num_layers = 1):
super(LSTMNetwork, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
# 进行嵌入
self.embedding1 = nn.Embedding(category_size, hidden_size)
self.embedding2 = nn.Embedding(name_size, hidden_size)
self.lstm = nn.LSTM(hidden_size*2, hidden_size, num_layers, batch_first = True)
# 隐含层内部的相互链接
# self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(hidden_size, output_size)
# 输出层
self.softmax = nn.LogSoftmax()
def forward(self, category_variable, name_variable, hidden):
# 先分别进行embedding层的计算
#print('---->')
#print(category_variable)
category = self.embedding1(category_variable)
name = self.embedding2(name_variable)
#print('---->')
#print(category)
#print(name)
input_variable = torch.cat([category,name]).view(HIDDEN_SIZE*2,-1)
#print('---->---->')
#print(input_variable)
# 从输入到隐含层的计算
output, hidden = self.lstm(input_variable, hidden)
# output的尺寸:batch_size, len_seq, hidden_size
output = output[:, -1, ...]
# 此时,output的尺寸为:batch_size, hidden_size
# 全连接层
output = self.fc(output)
# output的尺寸:batch_size, output_size
# softmax函数
output = self.softmax(output)
return output, hidden
def initHidden(self):
# 对隐含单元的初始化
# 注意尺寸是: layer_size, batch_size, hidden_size
# 对隐单元的初始化
# 对引单元输出的初始化,全0.
# 注意hidden和cell的维度都是layers,batch_size,hidden_size
hidden = Variable(torch.zeros(self.num_layers, 1, self.hidden_size))
# 对隐单元内部的状态cell的初始化,全0
cell = Variable(torch.zeros(self.num_layers, 1, self.hidden_size))
return (hidden, cell)
# 定义训练函数,在这个函数里,我们可以随机选择一条训练数据,遍历每个字符进行训练
def train_LSTM(lstm):
# 初始化 隐藏层、梯度清零、损失清零
hidden = lstm.initHidden()
optimizer.zero_grad()
loss = 0
# 随机选取一条训练数据
category_input, line_input, line_target = random_training_set()
#print('--- getting random data ---')
#print(category_input, line_input, line_target)
# 处理国别数据
category_variable = Variable(torch.LongTensor([category_input]))
# 循环字符
for t in range(len(line_input)):
# 姓氏
name_variable = Variable(torch.LongTensor([line_input[t]]))
# 目标
name_target = Variable(torch.LongTensor([line_target[t]]))
# 传入模型
output, hidden = lstm(category_variable, name_variable, hidden)
# 累加损失
loss += criterion(output, name_target)
# 计算平均损失
l = len(line_input)
loss = 1.0 * loss / l
# 反向传播、更新梯度
loss.backward()
optimizer.step()
return loss
import time
import math
def time_since(t):
now = time.time()
s = now - t
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
HIDDEN_SIZE = 64
num_epoch = 3
learning_rate = 0.002
num_layers = 2
# 实例化模型
lstm = LSTMNetwork(n_categories, n_letters-1, HIDDEN_SIZE, n_letters, num_layers=num_layers)
# 定义损失函数与优化方法
optimizer = torch.optim.Adam(lstm.parameters(), lr = learning_rate)
criterion = torch.nn.NLLLoss()
start = time.time()
records = []
# 开始训练循环
for epoch in range(num_epoch):
train_loss = 0
# 按所有数据的行数随机循环
for i in range(all_line_num):
loss = train_LSTM(lstm)
# train_loss += loss
#每隔3000步,跑一次校验集,并打印结果
if i % 2000 == 0:
training_process = (all_line_num * epoch + i) / (all_line_num * num_epoch) * 100
training_process = '%.2f' % training_process
print('第{}轮,训练损失:{:.2f},训练进度:{:.2f}%,({})'\
.format(epoch, loss.data.numpy()[0], float(training_process), time_since(start)))
records.append([loss.data.numpy()[0]])
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
%matplotlib inline
a = [i[0] for i in records]
plt.plot(a[0::500], label = 'Train Loss')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.legend()
max_length = 20
# 通过指定国别名 category
# 以及开始字符 start_char
# 还有混乱度 temperature 来生成一个名字
def generate_one(category, start_char='A', temperature=0.2):
# 初始化输入数据,国别 以及 输入的第一个字符
# 国别
category_idx = make_category_input(category)
category_variable = Variable(torch.LongTensor([category_idx]))
# 第一个字符
name_idx = all_letters.index(start_char)
name_variable = Variable(torch.LongTensor([name_idx]))
# 初始化隐藏层
hidden = lstm.initHidden()
output_str = start_char
for i in range(max_length):
# 调用模型
output, hidden = lstm(category_variable, name_variable, hidden)
# 这里是将输出转化为一个多项式分布
output_dist = output.data.view(-1).div(temperature).exp()
# 从而可以根据混乱度 temperature 来选择下一个字符
# 混乱度低,则趋向于选择网络预测最大概率的那个字符
# 混乱度高,则趋向于随机选择字符
top_i = torch.multinomial(output_dist, 1)[0]
# 生成字符是 EOS,则生成结束
if top_i == EOS:
break
else:
# 继续下一个字符
char = all_letters[top_i]
output_str += char
chars_input = all_letters.index(char)
name_variable = Variable(torch.LongTensor([chars_input]))
return output_str
# 再定义一个函数,方便每次生成多个名字
def generate(category, start_chars='ABC'):
for start_char in start_chars:
print(generate_one(category, start_char))
generate('Russian', 'RUSKCJ')
generate('German', 'GERS')
generate('Spanish', 'SPAJFC')
generate('Chinese', 'CHIFYL')
generate('English', 'ABCKFJSIL')
print(lstm)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 准备数据
Step2: 可以看到 "O'Néàl" 被转化成了以普通ASCII字符表示的 O'Neal。
Step3: 其中 all_letters 包含了我们数据集中所有可能出现的字符,也就是“字符表”。
Step4: 现在我们的数据准备好了,可以搭建神经网络了!
Step5: 首先处理国别,将国别名转化为索引。
Step6: 对于训练过程中的每一步,或者说对于训练数据中每个名字的每个字符来说,神经网络的输入是 (category, current letter, hidden state),输出是 (next letter, next hidden state)。
Step7: 同样为了训练时方便使用,我们建立一个 random_training_set 函数,以随机选择出数据集 (category, line) 并转化成训练需要的 Tensor: (category, input, target)。
Step8: 搭建神经网络
Step9: 开始训练!
Step10: 我们定义 time_since 函数,它可以打印出训练持续的时间。
Step11: 在下面你要定义损失函数、优化函数、实例化模型参数。
Step12: 训练的过程与我们前几节课一样,都是老套路啦!
Step13: 绘制观察损失曲线
Step14: 因为我在计算损失平均值时有“除0错误”,所以在损失曲线中有间断,大家可以改进我的计算方法,让损失曲线连贯起来。
Step15: 可以看到 LSTM 预测的效果,但显然还不理想,我想你可以通过调整网络模型,或者通过调整超参数让模型表现的更好。
|
7,324
|
<ASSISTANT_TASK:>
Python Code:
abbr = 'NLP'
full_text = 'Natural Language Processing'
# Enter your code here:
%%writefile contacts.txt
First_Name Last_Name, Title, Extension, Email
# Write your code here:
# Run fields to see the contents of contacts.txt:
fields
# Perform import
# Open the file as a binary object
# Use PyPDF2 to read the text of the file
# Get the text from page 2 (CHALLENGE: Do this in one step!)
page_two_text =
# Close the file
# Print the contents of page_two_text
print(page_two_text)
# Simple Solution:
# CHALLENGE Solution (re-run the %%writefile cell above to obtain an unmodified contacts.txt file):
import re
# Enter your regex pattern here. This may take several tries!
pattern =
re.findall(pattern, page_two_text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Files
Step2: 3. Open the file and use .read() to save the contents of the file to a string called fields. Make sure the file is closed at the end.
Step3: Working with PDF Files
Step4: 5. Open the file contacts.txt in append mode. Add the text of page 2 from above to contacts.txt.
Step5: Regular Expressions
|
7,325
|
<ASSISTANT_TASK:>
Python Code:
t = 'Python'
t[0:2]
t[::2]
t[::-1]
import numpy as np
arr = np.array([[3, 6, 2, 1, 7],
[4, 1, 3, 2, 8],
[7, 9, 2, 1, 8],
[8, 6, 9, 6, 7],
[9, 1, 9, 2, 6],
[9, 8, 1, 5, 6],
[0, 4, 2, 0, 6],
[0, 3, 1, 4, 7]])
arr[:, 0]
arr[2:6, 1:4]
arr = np.array([[[3, 6, 2, 1, 7],
[4, 1, 3, 2, 8],
[7, 9, 2, 1, 8],
[8, 6, 9, 6, 7],
[9, 1, 9, 2, 6],
[9, 8, 1, 5, 6],
[0, 4, 2, 0, 6],
[0, 3, 1, 4, 7]],
[[5, 5, 3, 9, 3],
[8, 3, 5, 1, 1],
[3, 4, 3, 0, 9],
[1, 4, 1, 0, 2],
[7, 1, 2, 0, 1],
[5, 1, 3, 7, 8],
[8, 0, 9, 6, 0],
[7, 7, 4, 4, 4]],
[[1, 0, 8, 9, 1],
[7, 4, 8, 8, 2],
[9, 1, 8, 3, 6],
[5, 6, 2, 0, 1],
[7, 4, 2, 5, 7],
[9, 5, 6, 8, 6],
[7, 4, 4, 7, 1],
[8, 4, 4, 9, 1]]])
arr.shape
arr[0:2, 0:4, -1]
arr[:, 0:2, 0:3]
arr[:, 0, 2]
from netCDF4 import Dataset
nc = Dataset('./data/mdt_cnes_cls2009_global_v1.1.nc')
nc
u = nc.variables['Grid_0002']
u
v = nc.variables['Grid_0003']
v
u, v = u[:], v[:]
lon = nc.variables['NbLongitudes'][:]
lat = nc.variables['NbLatitudes'][:]
import numpy as np
lon, lat = np.meshgrid(lon, lat)
lon.shape, lat.shape, u.shape, v.shape
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
sub = 5
ax.quiver(lon[::sub, ::sub], lat[::sub, ::sub], u.T[::sub, ::sub], v.T[::sub, ::sub])
from oceans import wrap_lon180
lon = wrap_lon180(lon)
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.io import shapereader
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
def make_map(projection=ccrs.PlateCarree()):
fig, ax = plt.subplots(figsize=(9, 13),
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
mask_x = np.logical_and(lon > -40, lon < -36)
mask_y = np.logical_and(lat > -15, lat < -12)
mask = np.logical_and(mask_x, mask_y)
import cartopy.feature as cfeature
land_10m = cfeature.NaturalEarthFeature('physical', 'land', '10m',
edgecolor='face',
facecolor=cfeature.COLORS['land'])
fig, ax = make_map()
ax.quiver(lon[mask], lat[mask], u.T[mask], v.T[mask])
ax.add_feature(land_10m)
ax.coastlines('10m')
ax.set_extent([-40, -36, -15, -12])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <img src='./files/2dbase2.png', width="300">
Step2: <img src='./files/2dbase1.png', width="300">
Step3: <img src='./files/3darray.png' width="300">
Step4: <img src='./files/3dbase2.png', width="300">
Step5: <img src='./files/3dbase5.png', width="300">
Step6: Mais que 3 dimensões
|
7,326
|
<ASSISTANT_TASK:>
Python Code:
# Это единственный комментарий который имеет смысл
# I s
def find_index(m,a):
try:
return a.index(m)
except :
return -1
def find_two_sum(a, s):
'''
>>> (3, 5) == find_two_sum([1, 3, 5, 7, 9], 12)
True
'''
if len(a)<2:
return (-1,-1)
idx = dict( (v,i) for i,v in enumerate(a) )
for i in a:
m = s - i
k = idx.get(m,-1)
if k != -1 :
return (i,k)
return (-1, -1)
print(find_two_sum([1, 3, 5, 7, 9], 12))
if __name__ == '__main__':
import doctest; doctest.testmod()
%%javascript
IPython.keyboard_manager.command_shortcuts.add_shortcut('g', {
handler : function (event) {
var input = IPython.notebook.get_selected_cell().get_text();
var cmd = "f = open('.toto.py', 'w');f.close()";
if (input != "") {
cmd = '%%writefile .toto.py\n' + input;
}
IPython.notebook.kernel.execute(cmd);
//cmd = "import os;os.system('open -a /Applications/MacVim.app .toto.py')";
//cmd = "!open -a /Applications/MacVim.app .toto.py";
cmd = "!code .toto.py";
IPython.notebook.kernel.execute(cmd);
return false;
}}
);
IPython.keyboard_manager.command_shortcuts.add_shortcut('u', {
handler : function (event) {
function handle_output(msg) {
var ret = msg.content.text;
IPython.notebook.get_selected_cell().set_text(ret);
}
var callback = {'output': handle_output};
var cmd = "f = open('.toto.py', 'r');print(f.read())";
IPython.notebook.kernel.execute(cmd, {iopub: callback}, {silent: false});
return false;
}}
);
# v=getattr(a, 'pop')(1)
s='print 4 7 '
commands={
'print':print,
'len':len
}
def exec_string(s):
global commands
chunks=s.split()
func_name=chunks[0] if len(chunks) else 'blbl'
func=commands.get(func_name,None)
params=[int(x) for x in chunks[1:]]
if func:
func(*params)
exec_string(s)
M = int(input())
m =set((map(int,input().split())))
N = int(input())
n =set((map(int,input().split())))
m ^ n
S='add 5 6'
method, *args = S.split()
print(method)
print(*map(int,args))
method,(*map(int,args))
# methods
# (*map(int,args))
# command='add'.split()
# method, args = command[0], list(map(int,command[1:]))
# method, args
for _ in range(2):
met, *args = input().split()
print(met, args)
try:
pass
# methods[met](*list(map(int,args)))
except:
pass
class Stack:
def __init__(self):
self.data = []
def is_empty(self):
return self.data == []
def size(self):
return len(self.data)
def push(self, val):
self.data.append(val)
def clear(self):
self.data.clear()
def pop(self):
return self.data.pop()
def __repr__(self):
return "Stack("+str(self.data)+")"
def sum_list(ls):
if len(ls)==0:
return 0
elif len(ls)==1:
return ls[0]
else:
return ls[0] + sum_list(ls[1:])
def max_list(ls):
print(ls)
if len(ls)==0:
return None
elif len(ls)==1:
return ls[0]
else:
m = max_list(ls[1:])
return ls[0] if ls[0]>m else m
def reverse_list(ls):
if len(ls)<2:
return ls
return reverse_list(ls[1:])+ls[0:1]
def is_ana(s=''):
if len(s)<2:
return True
return s[0]==s[-1] and is_ana(s[1:len(s)-1])
print(is_ana("abc"))
import turtle
myTurtle = turtle.Turtle()
myWin = turtle.Screen()
def drawSpiral(myTurtle, lineLen):
if lineLen > 0:
myTurtle.forward(lineLen)
myTurtle.right(90)
drawSpiral(myTurtle,lineLen-5)
drawSpiral(myTurtle,100)
# myWin.exitonclick()
t.forward(100)
from itertools import combinations_with_replacement
list(combinations_with_replacement([1,1,3,3,3],2))
hash((1,2))
# 4
# a a c d
# 2
from itertools import combinations
# N=int(input())
# s=input().split()
# k=int(input())
s='a a c d'.split()
k=2
combs=list(combinations(s,k))
print('{:.4f}'.format(len([x for x in combs if 'a' in x])/len(combs)))
# ------------------------------------------
import random
num_trials=10000
num_found=0
for i in range(num_trials):
if 'a' in random.sample(s,k):
num_found+=1
print('{:.4f}'.format(num_found/num_trials))
dir(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: https
Step2: Symmetric Difference
|
7,327
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TensorFlow is an open source machine learning library
# Note: The following line is temporary to use v2
!pip install tensorflow==2.0.0-beta0
import tensorflow as tf
# Numpy is a math library
import numpy as np
# Matplotlib is a graphing library
import matplotlib.pyplot as plt
# math is Python's math library
import math
# We'll generate this many sample datapoints
SAMPLES = 1000
# Set a "seed" value, so we get the same random numbers each time we run this
# notebook
np.random.seed(1337)
# Generate a uniformly distributed set of random numbers in the range from
# 0 to 2π, which covers a complete sine wave oscillation
x_values = np.random.uniform(low=0, high=2*math.pi, size=SAMPLES)
# Shuffle the values to guarantee they're not in order
np.random.shuffle(x_values)
# Calculate the corresponding sine values
y_values = np.sin(x_values)
# Plot our data. The 'b.' argument tells the library to print blue dots.
plt.plot(x_values, y_values, 'b.')
plt.show()
# Add a small random number to each y value
y_values += 0.1 * np.random.randn(*y_values.shape)
# Plot our data
plt.plot(x_values, y_values, 'b.')
plt.show()
# We'll use 60% of our data for training and 20% for testing. The remaining 20%
# will be used for validation. Calculate the indices of each section.
TRAIN_SPLIT = int(0.6 * SAMPLES)
TEST_SPLIT = int(0.2 * SAMPLES + TRAIN_SPLIT)
# Use np.split to chop our data into three parts.
# The second argument to np.split is an array of indices where the data will be
# split. We provide two indices, so the data will be divided into three chunks.
x_train, x_test, x_validate = np.split(x_values, [TRAIN_SPLIT, TEST_SPLIT])
y_train, y_test, y_validate = np.split(y_values, [TRAIN_SPLIT, TEST_SPLIT])
# Double check that our splits add up correctly
assert (x_train.size + x_validate.size + x_test.size) == SAMPLES
# Plot the data in each partition in different colors:
plt.plot(x_train, y_train, 'b.', label="Train")
plt.plot(x_test, y_test, 'r.', label="Test")
plt.plot(x_validate, y_validate, 'y.', label="Validate")
plt.legend()
plt.show()
# We'll use Keras to create a simple model architecture
from tensorflow.keras import layers
model_1 = tf.keras.Sequential()
# First layer takes a scalar input and feeds it through 16 "neurons". The
# neurons decide whether to activate based on the 'relu' activation function.
model_1.add(layers.Dense(16, activation='relu', input_shape=(1,)))
# Final layer is a single neuron, since we want to output a single value
model_1.add(layers.Dense(1))
# Compile the model using a standard optimizer and loss function for regression
model_1.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
# Train the model on our training data while validating on our validation set
history_1 = model_1.fit(x_train, y_train, epochs=1000, batch_size=16,
validation_data=(x_validate, y_validate))
# Draw a graph of the loss, which is the distance between
# the predicted and actual values during training and validation.
loss = history_1.history['loss']
val_loss = history_1.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g.', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Exclude the first few epochs so the graph is easier to read
SKIP = 50
plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')
plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
# Draw a graph of mean absolute error, which is another way of
# measuring the amount of error in the prediction.
mae = history_1.history['mae']
val_mae = history_1.history['val_mae']
plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')
plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')
plt.title('Training and validation mean absolute error')
plt.xlabel('Epochs')
plt.ylabel('MAE')
plt.legend()
plt.show()
# Use the model to make predictions from our validation data
predictions = model_1.predict(x_train)
# Plot the predictions along with to the test data
plt.clf()
plt.title('Training data predicted vs actual values')
plt.plot(x_test, y_test, 'b.', label='Actual')
plt.plot(x_train, predictions, 'r.', label='Predicted')
plt.legend()
plt.show()
model_2 = tf.keras.Sequential()
# First layer takes a scalar input and feeds it through 16 "neurons". The
# neurons decide whether to activate based on the 'relu' activation function.
model_2.add(layers.Dense(16, activation='relu', input_shape=(1,)))
# The new second layer may help the network learn more complex representations
model_2.add(layers.Dense(16, activation='relu'))
# Final layer is a single neuron, since we want to output a single value
model_2.add(layers.Dense(1))
# Compile the model using a standard optimizer and loss function for regression
model_2.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
history_2 = model_2.fit(x_train, y_train, epochs=600, batch_size=16,
validation_data=(x_validate, y_validate))
# Draw a graph of the loss, which is the distance between
# the predicted and actual values during training and validation.
loss = history_2.history['loss']
val_loss = history_2.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g.', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Exclude the first few epochs so the graph is easier to read
SKIP = 100
plt.clf()
plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')
plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
# Draw a graph of mean absolute error, which is another way of
# measuring the amount of error in the prediction.
mae = history_2.history['mae']
val_mae = history_2.history['val_mae']
plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')
plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')
plt.title('Training and validation mean absolute error')
plt.xlabel('Epochs')
plt.ylabel('MAE')
plt.legend()
plt.show()
# Calculate and print the loss on our test dataset
loss = model_2.evaluate(x_test, y_test)
# Make predictions based on our test dataset
predictions = model_2.predict(x_test)
# Graph the predictions against the actual values
plt.clf()
plt.title('Comparison of predictions and actual values')
plt.plot(x_test, y_test, 'b.', label='Actual')
plt.plot(x_test, predictions, 'r.', label='Predicted')
plt.legend()
plt.show()
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model_2)
tflite_model = converter.convert()
# Save the model to disk
open("sine_model.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model_2)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
# Save the model to disk
open("sine_model_quantized.tflite", "wb").write(tflite_model)
# Instantiate an interpreter for each model
sine_model = tf.lite.Interpreter('sine_model.tflite')
sine_model_quantized = tf.lite.Interpreter('sine_model_quantized.tflite')
# Allocate memory for each model
sine_model.allocate_tensors()
sine_model_quantized.allocate_tensors()
# Get the input and output tensors so we can feed in values and get the results
sine_model_input = sine_model.tensor(sine_model.get_input_details()[0]["index"])
sine_model_output = sine_model.tensor(sine_model.get_output_details()[0]["index"])
sine_model_quantized_input = sine_model_quantized.tensor(sine_model_quantized.get_input_details()[0]["index"])
sine_model_quantized_output = sine_model_quantized.tensor(sine_model_quantized.get_output_details()[0]["index"])
# Create arrays to store the results
sine_model_predictions = np.empty(x_test.size)
sine_model_quantized_predictions = np.empty(x_test.size)
# Run each model's interpreter for each value and store the results in arrays
for i in range(x_test.size):
sine_model_input().fill(x_test[i])
sine_model.invoke()
sine_model_predictions[i] = sine_model_output()[0]
sine_model_quantized_input().fill(x_test[i])
sine_model_quantized.invoke()
sine_model_quantized_predictions[i] = sine_model_quantized_output()[0]
# See how they line up with the data
plt.clf()
plt.title('Comparison of various models against actual values')
plt.plot(x_test, y_test, 'bo', label='Actual')
plt.plot(x_test, predictions, 'ro', label='Original predictions')
plt.plot(x_test, sine_model_predictions, 'bx', label='Lite predictions')
plt.plot(x_test, sine_model_quantized_predictions, 'gx', label='Lite quantized predictions')
plt.legend()
plt.show()
import os
basic_model_size = os.path.getsize("sine_model.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("sine_model_quantized.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
# Install xxd if it is not available
!apt-get -qq install xxd
# Save the file as a C source file
!xxd -i sine_model_quantized.tflite > sine_model_quantized.cc
# Print the source file
!cat sine_model_quantized.cc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create and convert a TensorFlow model
Step2: Generate data
Step3: Add some noise
Step4: Split our data
Step5: Design a model
Step6: Train the model
Step7: Check the training metrics
Step8: Look closer at the data
Step9: Further metrics
Step10: This graph of mean absolute error tells another story. We can see that training data shows consistently lower error than validation data, which means that the network may have overfit, or learned the training data so rigidly that it can't make effective predictions about new data.
Step11: Oh dear! The graph makes it clear that our network has learned to approximate the sine function in a very limited way. From 0 <= x <= 1.1 the line mostly fits, but for the rest of our x values it is a rough approximation at best.
Step12: We'll now train the new model. To save time, we'll train for only 600 epochs
Step13: Evaluate our new model
Step14: Great results! From these graphs, we can see several exciting things
Step15: Much better! The evaluation metrics we printed show that the model has a low loss and MAE on the test data, and the predictions line up visually with our data fairly well.
Step16: Test the converted models
Step17: We can see from the graph that the predictions for the original model, the converted model, and the quantized model are all close enough to be indistinguishable. This means that our quantized model is ready to use!
Step18: Our quantized model is only 16 bytes smaller than the original version, which only a tiny reduction in size! At around 2.6 kilobytes, this model is already so small that the weights make up only a small fraction of the overall size, meaning quantization has little effect.
|
7,328
|
<ASSISTANT_TASK:>
Python Code:
from gensim.models import Word2Vec
from sklearn.manifold import TSNE
from nltk.corpus import genesis
import matplotlib.pyplot as plt
from textblob import TextBlob
from pprint import pprint
import pandas as pd
import numpy as np
import logging
import csv
import re
% matplotlib inline
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = genesis.sents(fileids='english-kjv.txt')
print len(sentences)
pprint(sentences[0])
w2v = Word2Vec(sentences, min_count=1, workers=2)
labels = w2v.index2word
vectors = w2v.syn0
tisney = TSNE(n_components=2, random_state=42)
vectors2d = tisney.fit_transform(vectors)
print 'number of unique words = {}'.format(vectors.shape[0])
print 'number of dimensions = {}'.format(vectors.shape[1])
print 'man | king ==> semantic similarity: {}'.format(w2v.similarity('man','king'))
plt.figure(figsize=(15, 15))
for i, label in enumerate(labels):
if i > 1500:
break
x, y = vectors2d[i,:]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom',color='b')
plt.grid(True)
plt.show()
w2v = Word2Vec.load_word2vec_format(fname='models/GoogleNews-vectors-negative300.bin.gz', binary=True)
labels = w2v.index2word
vectors = w2v.syn0
tisney = TSNE(n_components=2, random_state=42)
vectors2d = tisney.fit_transform(vectors)
print 'man | king ==> semantic similarity:{}'.format(w2v.similarity('man','king'))
print 'man | queen ==> semantic similarity:{}'.format(w2v.similarity('man','queen'))
print 'man | woman ==> semantic similarity:{}'.format(w2v.similarity('man','woman'))
print 'woman | king ==> semantic similarity:{}'.format(w2v.similarity('woman','king'))
print 'influenza | virus ==> semantic similarity:{}'.format(w2v.similarity('influenza','virus'))
plt.figure(figsize=(15, 15))
for i, label in enumerate(labels):
if i > 1500:
break
x, y = vectors2d[i,:]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom',color='b')
plt.grid(True)
plt.show()
df = pd.read_csv('data/161207_ZikaLabels.csv')
sentences = []
for i in range(1000):
sent = df.diagnosisRAW[i].decode('ISO-8859-2').encode('ASCII','ignore').encode('UTF8').replace(',','').lower().split()
sentences.append(sent)
w2v = Word2Vec(sentences, min_count=1, workers=2)
labels = w2v.index2word
vectors = w2v.syn0
tisney = TSNE(n_components=2, random_state=42)
vectors2d = tisney.fit_transform(vectors)
plt.figure(figsize=(15, 15))
for i, label in enumerate(labels):
if i > 1000:
break
x, y = vectors2d[i,:]
plt.scatter(x, y)
if label in ['zica','zika','zoka']:
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom',color='r')
else:
pass
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom',color='k')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: BIBLE (Genesis)
Step2: GOOGLE NEWS
Step3: GYANT
|
7,329
|
<ASSISTANT_TASK:>
Python Code:
import os
import desc.monitor
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from lsst.sims.photUtils import calcNeff
%matplotlib inline
%load_ext autoreload
%autoreload 2
star_db_name = '../../twinkles_run1.1.db'
truth_dbConn = desc.monitor.TruthDBInterface(database=star_db_name, driver='sqlite')
twinkles_dbConn = desc.monitor.DBInterface(database='DESC_Twinkles_Level_2',
#host='127.0.0.1', port='3307', ##if not running jupyter-dev
host='scidb1.nersc.gov', port=3306,
driver='mysql', project='Twinkles Run1.1')
opsim_dbConn = desc.monitor.OpsimDBInterface('../../kraken_1042_sqlite.db') ##Run 1.1 OpSim database
#opsim_dbConn = desc.monitor.OpsimDBInterface('../../minion_1016_sqlite.db')
dm_visit_info = twinkles_dbConn.get_all_visit_info()
opsim_info = opsim_dbConn.get_summary_depth_info_for_visits(1427) #1427 is the Twinkles field ID
#Find entries with the proper obsHistIds in the Opsim data to match the Twinkles run
obs_list = []
for visit_id in dm_visit_info['visit_id']:
obs_list.append(np.where(opsim_info['obsHistID'] == visit_id)[0][0])
opsim_info = opsim_info[obs_list]
worker = desc.monitor.Monitor(twinkles_dbConn, truth_dbConn=truth_dbConn)
depth_curve = worker.measure_depth_curve()
seeing_curve = worker.measure_seeing_curve()
fig = plt.figure()
bins = 15
n,bins,p = plt.hist(depth_curve.lightcurve['mag'], histtype='step', lw=4, bins=15, label='From CcdVisit', range=(21.5,26.5))
plt.hist(opsim_info['fiveSigmaDepth'], histtype='step', bins=bins, lw=4, label='Opsim Values')
plt.legend()
plt.xlabel('5 sigma depth (mags)')
plt.ylabel('Number of Visits')
plt.title('Twinkles 1.1 5-sigma Depth')
#plt.ylim(0, 6500)
fig = plt.figure(figsize=(18, 12))
fig_num = 1
for filter_val in ['u', 'g', 'r', 'i', 'z', 'y']:
fig.add_subplot(2,3,fig_num)
n,bins,p = plt.hist(depth_curve.lightcurve['mag'][depth_curve.lightcurve['bandpass'] == str('lsst'+filter_val)],
histtype='step', lw=4, bins=15, label='CcdVisit', range=(21.5,26.5))
plt.hist(opsim_info['fiveSigmaDepth'][opsim_info['filter'] == filter_val], histtype='step', bins=bins, lw=4, label='Opsim Values')
if fig_num == 1:
plt.legend()
plt.xlabel('5 sigma depth (mags)')
plt.ylabel('Number of Visits')
#if fig_num == 1:
# plt.ylim(0, 2800)
plt.title(filter_val)
fig_num += 1
plt.suptitle('Twinkles 1.1 5-sigma Depth by filter')
plt.subplots_adjust(top=0.93)
plt.hist(opsim_info['FWHMeff'] - seeing_curve.seeing_curve['seeing'], range=(-0.2, 0.4), bins=20)
plt.title(r'Opsim $FWHM_{\bf{eff}}$ - DM seeing')
plt.xlabel(r'Opsim $FWHM_{\bf{eff}}$ - DM seeing (arcsec)')
plt.ylabel('# of visits')
fig = plt.figure(figsize=(8,6))
plt.scatter(opsim_info['FWHMeff'], opsim_info['FWHMeff'] - seeing_curve.seeing_curve['seeing'])
l1, = plt.plot(np.arange(0, 1.8, 0.01), np.zeros(len(np.arange(0, 1.8, 0.01))), c='r', label='DM seeing = Opsim seeing')
plt.xlim(0, 1.8)
#plt.ylim(0, 1.8)
plt.xlabel(r'Opsim $FWHM_{\bf{eff}}$ (arcsec)')
plt.ylabel(r'Opsim $FWHM_{\bf{eff}}$ - DM seeing (arcsec)')
plt.legend([l1], ['PhoSim+DM seeing = Opsim seeing'], loc=2)
plt.title('Twinkles 1.1 Seeing Comparison')
distances = worker.match_catalogs(return_distance=True, within_radius=1./3600.)
worker.calc_flux_residuals(depth_curve, seeing_curve)
fig = plt.figure(figsize=(18,12))
i=1
for band in ['u', 'g', 'r', 'i', 'z', 'y']:
fig.add_subplot(2,3,i)
worker.plot_bias_map(with_bins=12, in_band=band, use_existing_fig=fig)
i+=1
plt.tight_layout()
fig = plt.figure(figsize=(18,12))
i=1
for band in ['u', 'g', 'r', 'i', 'z', 'y']:
fig.add_subplot(2,3,i)
worker.plot_bias_map(with_bins=12, in_band=band, use_existing_fig=fig,
normalize=True)
i+=1
plt.tight_layout()
fig = plt.figure(figsize=(18,12))
i=1
for band in ['u', 'g', 'r', 'i', 'z', 'y']:
fig.add_subplot(2,3,i)
worker.plot_bias_scatter(in_band=band, use_existing_fig=fig)
i+=1
plt.gca().set_axis_bgcolor('k')
plt.tight_layout()
fig = plt.figure(figsize=(18,12))
i=1
for band in ['u', 'g', 'r', 'i', 'z', 'y']:
fig.add_subplot(2,3,i)
worker.plot_bias_scatter(in_band=band, use_existing_fig=fig, normalize=True)
i+=1
plt.gca().set_axis_bgcolor('k')
plt.tight_layout()
fig = plt.figure(figsize=(18,12))
i=1
for band in ['u', 'g', 'r', 'i', 'z', 'y']:
fig.add_subplot(2,3,i)
worker.plot_variance_map(with_bins=12, in_band=band, use_existing_fig=fig)
i+=1
plt.tight_layout()
fig = plt.figure(figsize=(18,12))
i=1
for band in ['u', 'g', 'r', 'i', 'z', 'y']:
fig.add_subplot(2,3,i)
worker.plot_variance_scatter(in_band=band, use_existing_fig=fig)
i+=1
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load necessary database connections.
Step2: Then we'll establish a database connection to the NERSC MySQL database for the observed data from Twinkles.
Step3: And finally we'll establish the connection the Opsim database.
Step4: Load the visit info from DM processed PhoSim images and from Opsim
Step5: Use Monitor to assemble depth and seeing curves
Step6: Comparing 5-sigma depth
Step7: And here we plot by filter.
Step8: It looks like there are discrepancies between the values measured using the PhoSim images and the Opsim values. We need to look into this. It looks like the DM calculated values in the PhoSim images are consistently deeper than the Opsim depth. Also, the effect looks the worst in the u and y filters. This indicates that there are differences that need to be explained between the PhoSim sky and the OpSim sky.
Step9: There are also discrepencies here as well. It seems like the PhoSim+DM values are consistently under the Opsim values.
Step10: Plotting Bias and Sigma
|
7,330
|
<ASSISTANT_TASK:>
Python Code:
# The line below can be ignored but I didn't set up my environment properly
import sys ; sys.path.append('/home/mjuenemann/.virtualenvs/ciscoconfparse/lib/python3.6/site-packages')
import ciscoconfparse
CONFIG =
!
hostname router01
!
tacacs-server host 192.0.2.34
tacacs-server key cheezit
!
interface Ethernet2/0
description Unprotected interface, facing towards Internet
ip address 192.0.2.14 255.255.255.240
no ip unreachables
ntp disable
no mop enable
mtu 900
!
interface Ethernet2/1
description Protected interface, facing towards DMZ
ip address 192.0.2.17 255.255.255.240
no mop enable
config = ciscoconfparse.CiscoConfParse(CONFIG.split('\n'))
config
tacacs_lines = config.find_objects(r'^tacacs')
tacacs_lines
first_tacacs_line = tacacs_lines[0]
type(first_tacacs_line)
first_tacacs_line.linenum
first_tacacs_line.indent
first_tacacs_line.text
interfaces_with_ntp_disabled = config.find_objects_w_child(r'^interface', r'ntp disable')
interfaces_with_ntp_disabled
interfaces_with_ntp_disabled[0].text
interfaces_with_ntp_disabled[0].ioscfg
interfaces_with_ntp_not_disabled = config.find_objects_wo_child(r'^interface', r'ntp disable')
interfaces_with_ntp_not_disabled
interfaces_with_ntp_not_disabled[0].ioscfg
results = config.find_objects_w_all_children(r'interface', [r'no ip unreachables', r'no mop enable'])
results
results[0].ioscfg
results = config.find_objects_w_parents(r'^interface', 'no mop enable')
results
for result in results:
result.delete()
# Call .commit() after changing the configuration
config.commit()
config.ioscfg
config.append_line('ntp server 192.168.1.1')
# Call .commit() before searching again!!!
config.commit()
config.ioscfg
# Delete all existing MTU lines.
for interface in config.find_objects(r'^interface.+Ethernet'):
interface.delete_children_matching('mtu \d+')
config.commit()
# Add the correct MTU. Note the use of the correct indentation value for children.
for interface in config.find_objects(r'^interface.+Ethernet'):
interface.append_to_family('mtu 1500', indent=interface.child_indent)
config.commit()
config.ioscfg
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: I am going to use a very stripped down version of the Secure IOS Template by Team Cymru. This is not a fully functional IOS configuration!
Step3: First, he configuration must be parsed by creating an instance of ciscoconfparse.CiscoConfParse(). The class expects either a file object or a list of configuration lines.
Step4: When CiscoConfParse() reads a configuration, it stores parent-child relationships as a special IOSCfgLine object. IOSCfgLine instances are returned when one queries the parsed configuration.
Step5: IOSCfgLine instances have several useful attributes.
Step6: Finding sections with children
Step7: The .text attribute only returns the matching line whereas the .ioscfg attributes includes all children lines as a list.
Step8: Finding sections without children
Step9: Finding sections with all children
Step10: Finding lines with parents
Step11: Deleting lines
Step12: The no mop enable lines are now missing.
Step13: Adding lines
Step14: Adding lines to sections
|
7,331
|
<ASSISTANT_TASK:>
Python Code:
# Imports for this Python3 notebook
import numpy
import matplotlib.pyplot as plt
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from rios import rat
from rios import ratapplier
from tpot import TPOTRegressor
# Read Biomass library data from the csv file
fieldBiomass=numpy.loadtxt("biolib_sitelist_auscover.csv",delimiter=',', skiprows=1)
# Open Height Map dataset
keaFile = "alpsbk_aust_y2009_sc5a2.kea"
heightDataset = gdal.Open(keaFile, gdal.GA_Update)
# Set up the reprojection transform from WGS84 (biomass library) to Australian Albers (height data)
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(3577)
transform = osr.CoordinateTransformation(source, target)
# Open the raster band with the segment IDs
heightBand=heightDataset.GetRasterBand(1)
# Get the Albers to pixel transform
geoTransform=heightDataset.GetGeoTransform()
# Find the segmentID for all the field sites
print("Linking field observations to segment IDs\n")
segmentIDs = []
for record in fieldBiomass:
# Make up a site OGR point
site = ogr.Geometry(ogr.wkbPoint)
site.AddPoint(record[0], record[1])
# Transform the site to EPSG3577
site.Transform(transform)
# Get the pixel location of the site
mx,my=site.GetX(), site.GetY() #coord in map units
#Convert from map to pixel coordinates.
#Only works for geotransforms with no rotation.
px = int((mx - geoTransform[0]) / geoTransform[1]) #x pixel
py = int((my - geoTransform[3]) / geoTransform[5]) #y pixel
# Extract the segmentID for the location
segmentIDs.append(heightBand.ReadAsArray(px,py,1,1)[0][0])
# Get the RAT column names
colNames = rat.getColumnNames(heightDataset)
# Select the columns used for the training/prediction
trainingColumns = [5,6,7,8,9,10,15,19,22,23,25,26,27,28,29,30,31,32,33,34]
trainingNames = [colNames[i] for i in trainingColumns]
# Now we have the segmentIDs, pull the image data from the RAT that corresponds to the segment IDs
imageData = []
# Iterate for all the RAT columns
for name in trainingNames:
print("Extracting sites from " + name)
# Extract the array of values corresponding to the field site segments
imageData.append(rat.readColumnFromBand(heightBand,name).astype('float')[segmentIDs])
# Convert the list of arrays to an array
imageData = numpy.transpose(numpy.array(imageData))
# Remove nodata from the couple of segments too small to get statistics in the image data
goodDataIDX = imageData.min(axis=1)>0
imageData = imageData[goodDataIDX]
fieldBiomass = fieldBiomass[goodDataIDX]
print("\nTraining data has %d observations and %d columns" % imageData.shape)
# Total amount of time we allow for the training
optTime = 600
# Number of CPUs to use for training and cross validation
nCPUs = 4
# What function to minimise
scoring = 'mean_squared_error'
# Number of subsamples from the Biomass Library for model training
nSubsets = 9999
# We select the tb_drymass_ha column to train on
totalBiomass = fieldBiomass[:,10]
# This is the standard error of the site level estimates
totalBiomassSE = fieldBiomass[:,11]
# Select a subsample to improve the model search speed
subSample = numpy.random.choice(len(totalBiomass),nSubsets,replace=False)
biomass=totalBiomass[subSample]
biomassSE=totalBiomassSE[subSample]
trainData=imageData[subSample]
# Use the proportion of the error in the estimates as fitting weights
biomassWeights=biomass/biomassSE
# Setup the TPOT regression options
tpot = TPOTRegressor(max_time_mins=optTime,
n_jobs = nCPUs,
scoring=scoring,
verbosity=2,
cv=10,
max_eval_time_mins=1,
population_size=100)
# Start testing models using 10 fold cross validation and 100 models per generation
tpot.fit(trainData, biomass, sample_weight=biomassWeights)
# Export the best model to a file
tpot.export('tpot_biomass_pipeline.py')
# Build the biomass predictive model
biomassModel = tpot._fitted_pipeline.fit(imageData, totalBiomass)
# Predict the full dataset
predBiomass = biomassModel.predict(imageData)
# Print some RMSE Statistics for various ranges
print("\nTotal RMSE = %f\n" % numpy.sqrt(numpy.mean((totalBiomass-predBiomass)**2)))
stopPoints=[0,100,500,1000,2000,5000,10000]
print("Start"," Stop","Count"," RMSE")
for i in range(len(stopPoints)-1):
idx=numpy.logical_and(totalBiomass>stopPoints[i],totalBiomass<stopPoints[i+1])
rmse=numpy.sqrt(numpy.mean((totalBiomass[idx]-predBiomass[idx])**2))
print('{0:5d} {1:5d} {2:5d} {3:5.0f}'.format(stopPoints[i],stopPoints[i+1],idx.sum(),rmse))
# Plot the Output in a LogLog figure
fig = plt.figure(figsize=(10,10))
plt.loglog(totalBiomass,predBiomass, 'g.',[10,10000], [10,10000],'r-')
plt.xlabel('Observed (Mg/ha)', fontsize=18)
plt.ylabel('Predicted (Mg/ha)', fontsize=18)
plt.title('Total Biomass Estimate', fontsize=32)
plt.xlim([10,10000])
plt.ylim([10,10000])
plt.grid(which='minor', alpha=0.4)
plt.grid(which='major', alpha=0.8)
%%time
def _ratapplier_calc_biomass(info, inputs, outputs):
Calculate Biomass from RAT.
Called by ratapplier below
ratArray = []
# Iterate for all the RAT columns
for name in trainingNames:
# Extract the array of values corresponding to the field site segments
ratArray.append(getattr(inputs.inrat, name).astype('float'))
# Convert the list of arrays to an array
ratArray = numpy.transpose(numpy.array(ratArray))
# Predict Biomass
biomass = biomassModel.predict(ratArray)
# Make the weird inputs nodata
biomass[ratArray.min(axis=1) < numpy.finfo(numpy.float32).eps] = 0
# Save to 'totalBiomass' column (will create if doesn't exist)
setattr(outputs.outrat,"totalBiomass", biomass)
# Set up ratapplier for input / output
inFile = ratapplier.RatAssociations()
outFile = ratapplier.RatAssociations()
# Pass in clumps file for the input and output as we'll be updating the existing RAT
inFile.inrat = ratapplier.RatHandle(keaFile)
outFile.outrat = ratapplier.RatHandle(keaFile)
# Apply function to all rows in chunks
ratapplier.apply(_ratapplier_calc_biomass, inFile, outFile)
# Additional Imports
import rsgislib
from rsgislib.rastergis import exportCol2GDALImage
# Setup the export parameters
outimage='/home/jovyan/work/Temp/totalBiomass.tif'
gdalformat = 'GTIFF'
datatype = rsgislib.TYPE_16UINT
field = 'totalBiomass'
# Run the export
exportCol2GDALImage(keaFile, outimage, gdalformat, datatype, field)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the training data
Step2: Build the Machine Learning Model
Step4: Predict Biomass using the RAT
Step5: Export the Biomass band to an individual GeoTIFF
|
7,332
|
<ASSISTANT_TASK:>
Python Code:
data_dir = './data'
# FloydHub - Use with data ID "R5KrjnANiKVhLWAkpXhNBe"
#data_dir = '/input'
DON'T MODIFY ANYTHING IN THIS CELL
import helper
helper.download_extract('mnist', data_dir)
helper.download_extract('celeba', data_dir)
show_n_images = 25
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
import os
from glob import glob
from matplotlib import pyplot
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
pyplot.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')
show_n_images = 25
DON'T MODIFY ANYTHING IN THIS CELL
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB'))
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
import problem_unittests as tests
def model_inputs(image_width, image_height, image_channels, z_dim):
Create the model inputs
:param image_width: The input image width
:param image_height: The input image height
:param image_channels: The number of image channels
:param z_dim: The dimension of Z
:return: Tuple of (tensor of real input images, tensor of z data, learning rate)
# TODO: Implement Function
return None, None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def discriminator(images, reuse=False):
Create the discriminator network
:param image: Tensor of input image(s)
:param reuse: Boolean if the weights should be reused
:return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_discriminator(discriminator, tf)
def generator(z, out_channel_dim, is_train=True):
Create the generator network
:param z: Input z
:param out_channel_dim: The number of channels in the output image
:param is_train: Boolean if generator is being used for training
:return: The tensor output of the generator
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_generator(generator, tf)
def model_loss(input_real, input_z, out_channel_dim):
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param out_channel_dim: The number of channels in the output image
:return: A tuple of (discriminator loss, generator loss)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_loss(model_loss)
def model_opt(d_loss, g_loss, learning_rate, beta1):
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_opt(model_opt, tf)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):
Show example output for the generator
:param sess: TensorFlow session
:param n_images: Number of Images to display
:param input_z: Input Z Tensor
:param out_channel_dim: The number of channels in the output image
:param image_mode: The mode to use for images ("RGB" or "L")
cmap = None if image_mode == 'RGB' else 'gray'
z_dim = input_z.get_shape().as_list()[-1]
example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])
samples = sess.run(
generator(input_z, out_channel_dim, False),
feed_dict={input_z: example_z})
images_grid = helper.images_square_grid(samples, image_mode)
pyplot.imshow(images_grid, cmap=cmap)
pyplot.show()
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode):
Train the GAN
:param epoch_count: Number of epochs
:param batch_size: Batch Size
:param z_dim: Z dimension
:param learning_rate: Learning Rate
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:param get_batches: Function to get batches
:param data_shape: Shape of the data
:param data_image_mode: The image mode to use for images ("RGB" or "L")
# TODO: Build Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epoch_count):
for batch_images in get_batches(batch_size):
# TODO: Train Model
batch_size = None
z_dim = None
learning_rate = None
beta1 = None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
epochs = 2
mnist_dataset = helper.Dataset('mnist', glob(os.path.join(data_dir, 'mnist/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, mnist_dataset.get_batches,
mnist_dataset.shape, mnist_dataset.image_mode)
batch_size = None
z_dim = None
learning_rate = None
beta1 = None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
epochs = 1
celeba_dataset = helper.Dataset('celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches,
celeba_dataset.shape, celeba_dataset.image_mode)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Face Generation
Step3: Explore the Data
Step5: CelebA
Step7: Preprocess the Data
Step10: Input
Step13: Discriminator
Step16: Generator
Step19: Loss
Step22: Optimization
Step25: Neural Network Training
Step27: Train
Step29: MNIST
Step31: CelebA
|
7,333
|
<ASSISTANT_TASK:>
Python Code::
import LightGBM as lgb
def custom_loss(y_pred, data):
y_true = data.get_label()
error = y_pred-y_true
#1st derivative of loss function
grad = 2 * error
#2nd derivative of loss function
hess = 0 * error + 2
return grad, hess
params = {"learning_rate" : 0.1}
training_data = lgb.Dataset(X_train , label = y_train)
model = lgb.train(train_set=training_data,
params=params,
fobj=custom_loss)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,334
|
<ASSISTANT_TASK:>
Python Code:
from Frame2D import Frame2D
from Frame2D.Members import Member
# because units are kips, inches
Member.E = 30000. #ksi
Member.G = 11500.
from IPython import display
display.Image('data/Beaufait-9-4-1.d/fig1.jpg')
frame = Frame2D('Beaufait-9-4-1') # Example 9.4.1, p. 460
frame.input_all()
rs = frame.solve()
frame.print_input()
frame.print_results(rs,mult=[1.,1.,1./12.])
import pandas as pd
efs = [('M1',11.77,2.72,33.06,-11.77,-2.72,7.75), # end forces from soln, Beaufait, p 473
('M2',9.40,8.85,-7.83,0.60,15.15,-74.11),
('M3',14.18,5.27,74.10,-14.18,-5.27,57.81)]
BOOK_MEFS = pd.DataFrame(efs,columns='ID,FXJ,FYJ,MZJ,FXK,FYK,MZK'.split(',')).set_index('ID')
BOOK_MEFS[['MZJ','MZK']] *= 12. # convert ft-kips to in-kips
BOOK_MEFS
HERE_MEFS = pd.DataFrame(frame.list_member_end_forces(rs),
columns='ID,FXJ,FYJ,MZJ,FXK,FYK,MZK'.split(',')).set_index('ID')
HERE_MEFS
pdiff = (100*(HERE_MEFS-BOOK_MEFS)/BOOK_MEFS)
pdiff.round(2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compare Solution Here with that in the Book
|
7,335
|
<ASSISTANT_TASK:>
Python Code:
7 // 3 # Floor division results in the quotient
7 % 3 # Modulus returns the remainder
5 == 5
5 == 4
True and True
True and False
True or True
True or False
False or False
True and (True or False)
x = 1
if( x > 0 ):
print( 'x is positive' )
if( 0 == x % 2 ):
print( 'x is even' )
else:
print( 'x is odd' )
y = 0
if( x < y ):
print( 'x is less than y' )
elif( x > y ):
print( 'x is greater than y' )
else:
print( 'x and y are equal' )
# TODO - calculate a tax rate, tip or GPA example
if( x == y ):
print( 'x and y are equal' )
else:
if( x < y ):
print( 'x is less than y' )
else:
print( 'x is greater than y' )
# Using nested conditionals
if( 0 < x ):
if( x < 10 ):
print( 'x is a positive single-digit number' )
# Using logical operators - this is easier to read
if( (0 < x) and (x > 10) ):
print( 'x is a positive single-digit number' )
def countdown( n ):
if( n <= 0 ):
print( 'Blastoff!' )
else:
print( n )
# Recursive call
countdown( n - 1 )
countdown( 5 )
def countdown_unsafe( n ):
if( n == 0 ):
print( 'Blastoff!' )
else:
print( n )
# Recursive call
countdown( n - 1 )
# Uncomment this to see what will happen
# countdown_unsafe( 3.5 )
def calculate_gpa( a_hours, b_hours, c_hours, d_hours, f_hours ):
# INSERT YOUR CODE HERE
return 0
def calculate_fibonacci( n ):
# INSERT YOUR CODE HERE
return 0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Boolean expressions
Step2: True and False are not strings, nor are the equivalent to strings
Step3: When combining multiple expresssions, don't be afraid to use parenthesis
Step4: All operands in a boolean expression should be boolean expressions or boolean variables/values
Step5: The boolean expression after the if is called the condition
Step6: The possible paths the code can follow are referred to as branches
Step7: elif is an abbreviation for "else if"
Step8: Nested conditionals
Step9: The outer conditional contains two (2) branches
Step10: Recursion
Step11: This problem-solving approach is called divide and conquer
Step12: It has been modified so that the conditional is an equality test
Step13: Using <= acts as a failsafe if an incorrect value is passed to the function
Step14: Write a function called calculate_fibonacci that takes a parameter n, denoting the $n$-th fibonacci number, and returns the fibonacci number.
|
7,336
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib notebook
import sqlite3
conn = sqlite3.connect("intro.db")
cur = conn.cursor()
cur.execute( # complete
cur.execute(create table DSFPstudents(
Name text,
Institution text,
Year tinyint
))
cur.execute( # complete
cur.execute(insert into DSFPstudents(Name, Institution, Year)
values ("Adam Miller", "Northwestern", 13))
cur.execute(insert into DSFPstudents(Name, Institution, Year)
values ("Lucianne Walkowicz", "Adler", 14))
cur.execute( # complete
cur.fetchall()
cur.execute(select Institution from DSFPstudents where year > 2)
cur.fetchall()
# you may need to run conda install -c astropy astroquery
from astroquery.sdss import SDSS
SDSS.query_sql( # complete
SDSS.query_sql(select top 20 * from PhotoObjAll)
SDSS.query_sql( # complete
SDSS.query_sql(select top 20 objid, cModelMag_u, cModelMag_g, cModelMag_r, cModelMag_i, cModelMag_z,
class
from photoobjall p
inner join specobjall s on p.objid = s.bestobjid)
SDSS.query_sql(select top 20 objid, cModelMag_u, cModelMag_g, cModelMag_r, cModelMag_i, cModelMag_z,
class
from photoobjall p
left outer join specobjall s on p.objid = s.bestobjid)
SDSS.query_sql(select top 20 objid, cModelMag_u, cModelMag_g, cModelMag_r, cModelMag_i, cModelMag_z,
class
from photoobjall p
right outer join specobjall s on s.bestobjid = p.objid
)
SDSS.query_sql(select rm.*
from
(select r.objid, r.sourcename, r.ra, r.dec, r.cps, r.hr1, r.hr2, cModelMag_u, cModelMag_g, cModelMag_r, cModelMag_i, cModelMag_z
from photoobjall p join rosat r on p.objid = r.objid
where (cModelFlux_u + cModelFlux_g + cModelFlux_r + cModelFlux_i + cModelFlux_z > 10000)
and p.type = 3) as rm
left join specobjall p on rm.objid = p.bestobjid
where p.bestobjid is null
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: At the most basic level - databases store your bytes, and later return those bytes (or a subset of them) when queried.
Step2: Without diving too much into the weeds (we'll investigate this further later this week), we need to establish a connection to the database. From the connection we create a cursor, which allows us to actually interact with the database.
Step4: And just like that - we have now created a new database intro.db, with which we can "store bytes" or later "retrieve bytes" once we have added some data to the database.
Step8: Once a table is created, we can use the database to store bytes. If I were to populate my PetInfo table I would do the following
Step12: Now that we have bytes in the database, we can retrieve those bytes with one (or several) queries. There are 3 basic building blocks to a query
Step13: In closing this brief introduction to databases, note that good databases follow the 4 ACID properties
Step15: astroquery enables seemless connections to the SDSS database via the Python shell.
Step17: That's more columns than we will likely ever need. Instead, let's focus on objID, a unique identifier, cModelMag_u, cModelMag_g, cModelMag_r, cModelMag_i, and cModelMag_z, the source magnitude in $u', g', r', i', z'$, respectively.
Step19: Problem 2c
Step21: Problem 2d
Step23: Challenge Problem
|
7,337
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = error * self.weights_hidden_to_output.T * (hidden_outputs * (1 - hidden_outputs))
# Weight step (input to hidden)
delta_weights_i_h += hidden_error * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += error * hidden_outputs[:, None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
### Commented code used to identify hyper params - iterations and learning rate. Psuedo GridSearchCV type.
#iterations_list = [600,650,700]
#learning_rate_list = [1.6, 1.7, 1.8, 1.85, 1.9]
iterations = 1800
learning_rate = 1.7
hidden_nodes = 8
output_nodes = 1
#min_iter = 1000
#min_lr = 5
#min_val_loss = 1.0
#for iterations in iterations_list:
# for learning_rate in learning_rate_list:
#### Original Code Start
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
#### Original Code End
# if val_loss < min_val_loss:
# min_lr = learning_rate
# min_iter = iterations
# min_val_loss = val_loss
#print ("Learning Rate =", min_lr)
#print ("Iterations =", min_iter)
#print ("Validation Loss =", min_val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
7,338
|
<ASSISTANT_TASK:>
Python Code:
from networkit import *
%matplotlib inline
cd ~/workspace/NetworKit/
G = readGraph("input/PGPgiantcompo.graph", Format.METIS)
n = G.numberOfNodes()
m = G.numberOfEdges()
print(n, m)
G.toString()
V = G.nodes()
print(V[:10])
E = G.edges()
print(E[:10])
edgeExists = G.hasEdge(42,11)
if edgeExists:
print("Weight of existing edge:", G.weight(42,11))
print("Weight of nonexisting edge:", G.weight(42,12))
count = 0 # counts number of nodes with more than 100 neighbors
for v in G.nodes():
if G.degree(v) > 100:
count = count + 1
print("Number of nodes with more than 100 neighbors: ", count)
# Enter code for Q&A Session #1 here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In case a Python warning appears that recommends an update to Python 3.4, simply ignore it for this tutorial. Python 3.3 works just as fine for our purposes.
Step2: Reading Graphs
Step3: In the course of this tutorial, we are going to work (among others) on the PGPgiantcompo network, a social network/web of trust in which nodes are PGP keys and an edge represents a signature from one key on another (web of trust). It is distributed with NetworKit as a good starting point.
Step4: NetworKit stores nodes simply as integer indices. Edges are pairs of such indices. The following prints the indices of the first ten nodes and edges, respectively.
Step5: Another very useful feature is to determine if an edge is present and what its weight is. In case of unweighted graphs, edges have the default weight 1.
Step6: Many graph algorithms can be expressed with iterators over nodes or edges. As an example, let us iterate over the nodes to determine how many of them have more than 100 neighbors.
Step7: Interesting Features of a Network
|
7,339
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
values = np.random.uniform(-10.0, 10.0, 100000)
plt.hist(values, 50)
plt.show()
from scipy.stats import norm
import matplotlib.pyplot as plt
x = np.arange(-3, 3, 0.001)
plt.plot(x, norm.pdf(x))
import numpy as np
import matplotlib.pyplot as plt
mu = 5.0
sigma = 2.0
values = np.random.normal(mu, sigma, 10000)
plt.hist(values, 50)
plt.show()
from scipy.stats import expon
import matplotlib.pyplot as plt
x = np.arange(0, 10, 0.001)
plt.plot(x, expon.pdf(x))
from scipy.stats import binom
import matplotlib.pyplot as plt
n, p = 10, 0.5
x = np.arange(0, 10, 0.001)
plt.plot(x, binom.pmf(x, n, p))
from scipy.stats import poisson
import matplotlib.pyplot as plt
mu = 500
x = np.arange(400, 600, 0.5)
plt.plot(x, poisson.pmf(x, mu))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Normal / Gaussian
Step2: Generate some random numbers with a normal distribution. "mu" is the desired mean, "sigma" is the standard deviation
Step3: Exponential PDF / "Power Law"
Step4: Binomial Probability Mass Function
Step5: Poisson Probability Mass Function
|
7,340
|
<ASSISTANT_TASK:>
Python Code:
import parselmouth
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set() # Use seaborn's default style to make attractive graphs
plt.rcParams['figure.dpi'] = 100 # Show nicely large images in this notebook
snd = parselmouth.Sound("audio/the_north_wind_and_the_sun.wav")
plt.figure()
plt.plot(snd.xs(), snd.values.T)
plt.xlim([snd.xmin, snd.xmax])
plt.xlabel("time [s]")
plt.ylabel("amplitude")
plt.show() # or plt.savefig("sound.png"), or plt.savefig("sound.pdf")
snd_part = snd.extract_part(from_time=0.9, preserve_times=True)
plt.figure()
plt.plot(snd_part.xs(), snd_part.values.T, linewidth=0.5)
plt.xlim([snd_part.xmin, snd_part.xmax])
plt.xlabel("time [s]")
plt.ylabel("amplitude")
plt.show()
def draw_spectrogram(spectrogram, dynamic_range=70):
X, Y = spectrogram.x_grid(), spectrogram.y_grid()
sg_db = 10 * np.log10(spectrogram.values)
plt.pcolormesh(X, Y, sg_db, vmin=sg_db.max() - dynamic_range, cmap='afmhot')
plt.ylim([spectrogram.ymin, spectrogram.ymax])
plt.xlabel("time [s]")
plt.ylabel("frequency [Hz]")
def draw_intensity(intensity):
plt.plot(intensity.xs(), intensity.values.T, linewidth=3, color='w')
plt.plot(intensity.xs(), intensity.values.T, linewidth=1)
plt.grid(False)
plt.ylim(0)
plt.ylabel("intensity [dB]")
intensity = snd.to_intensity()
spectrogram = snd.to_spectrogram()
plt.figure()
draw_spectrogram(spectrogram)
plt.twinx()
draw_intensity(intensity)
plt.xlim([snd.xmin, snd.xmax])
plt.show()
def draw_pitch(pitch):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values==0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
pitch = snd.to_pitch()
# If desired, pre-emphasize the sound fragment before calculating the spectrogram
pre_emphasized_snd = snd.copy()
pre_emphasized_snd.pre_emphasize()
spectrogram = pre_emphasized_snd.to_spectrogram(window_length=0.03, maximum_frequency=8000)
plt.figure()
draw_spectrogram(spectrogram)
plt.twinx()
draw_pitch(pitch)
plt.xlim([snd.xmin, snd.xmax])
plt.show()
import pandas as pd
def facet_util(data, **kwargs):
digit, speaker_id = data[['digit', 'speaker_id']].iloc[0]
sound = parselmouth.Sound("audio/{}_{}.wav".format(digit, speaker_id))
draw_spectrogram(sound.to_spectrogram())
plt.twinx()
draw_pitch(sound.to_pitch())
# If not the rightmost column, then clear the right side axis
if digit != 5:
plt.ylabel("")
plt.yticks([])
results = pd.read_csv("other/digit_list.csv")
grid = sns.FacetGrid(results, row='speaker_id', col='digit')
grid.map_dataframe(facet_util)
grid.set_titles(col_template="{col_name}", row_template="{row_name}")
grid.set_axis_labels("time [s]", "frequency [Hz]")
grid.set(facecolor='white', xlim=(0, None))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once we have the necessary libraries for this example, we open and read in the audio file and plot the raw waveform.
Step2: snd is now a Parselmouth Sound object, and we can access its values and other properties to plot them with the common matplotlib Python library
Step3: It is also possible to extract part of the speech fragment and plot it separately. For example, let's extract the word "sun" and plot its waveform with a finer line.
Step4: Next, we can write a couple of ordinary Python functions to plot a Parselmouth Spectrogram and Intensity.
Step5: After defining how to plot these, we use Praat (through Parselmouth) to calculate the spectrogram and intensity to actually plot the intensity curve overlaid on the spectrogram.
Step6: The Parselmouth functions and methods have the same arguments as the Praat commands, so we can for example also change the window size of the spectrogram analysis to get a narrow-band spectrogram. Next to that, let's now have Praat calculate the pitch of the fragment, so we can plot it instead of the intensity.
Step7: Using the FacetGrid functionality from seaborn, we can even plot plot multiple a structured grid of multiple custom spectrograms. For example, we will read a CSV file (using the pandas library) that contains the digit that was spoken, the ID of the speaker and the file name of the audio fragment
|
7,341
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output,hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
#output_errors = # Output layer error is the difference between desired target and actual output.
output_errors = targets_list-final_outputs
# TODO: Backpropagated error
hidden_errors = np.dot( self.weights_hidden_to_output.T, output_errors.T)
hidden_grad = hidden_errors * (hidden_outputs * (1 - hidden_outputs))
# TODO: Update the weights
self.weights_hidden_to_output += self.lr*(np.dot(output_errors.T,hidden_outputs.T))
self.weights_input_to_hidden += self.lr*(np.dot(hidden_grad,inputs.T))
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot( self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 2000
learning_rate = 0.009
hidden_nodes = 13
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
|
7,342
|
<ASSISTANT_TASK:>
Python Code:
from pandas import Series
from igraph import *
from numba import jit
import numpy as np
import os
import time
# Gather all the files.
files = os.listdir('timeseries/')
# Concatenate (or stack) all the files.
# Approx 12.454981 seconds
i = 0
for f in files:
if i == 0:
ts_matrix = np.loadtxt('timeseries/' + f).T
i += 1
else:
new_ts = np.loadtxt('timeseries/' + f).T
ts_matrix = np.hstack((ts_matrix, new_ts))
Compute the correlation matrix
corr_mat = np.corrcoef(ts_matrix.T)
# Save in .npz file
# np.savez_compressed('corr_mat.npz', corr_mat=corr_mat)
# X = np.load('corr_mat.npz')
# X = X['corr_mat']
# a flatten function optimized by numba
@jit
def fast_flatten(X):
k = 0
length = X.shape[0] * X.shape[1]
X_flat = np.empty(length)
for i in xrange(X.shape[0]):
for j in xrange(X.shape[1]):
X_flat[k] = X[i, j]
k += 1
return X_flat
# helper function that returns the min of the number of
# unique values depending on the threshold
def min_thresh_val(X, threshold):
X_flat = fast_flatten(X)
index = int(len(X_flat) * threshold)
return np.unique(sort(X_flat))[::-1][:index].min()
# Computes the threshold matrix without killing the python kernel
@jit
def thresh_mat(X, threshold):
min_val = min_thresh_val(X, threshold)
print("Done with min_thresh_val")
# M = zeros((X.shape[0], X.shape[1]))
for i in xrange(X.shape[0]):
for j in xrange(X.shape[1]):
# if X[i, j] >= min_val:
# M[i, j] = X[i, j]
if X[i, j] < min_val:
X[i, j] = 0
thresh_mat(X, .01)
print("Finished Threshold Matrix")
# savez_compressed('threshold_mat.npz', threshold_mat=X)
# from: http://stackoverflow.com/questions/29655111/igraph-graph-from-numpy-or-pandas-adjacency-matrix
# get the row, col indices of the non-zero elements in your adjacency matrix
conn_indices = np.where(X)
# get the weights corresponding to these indices
weights = X[conn_indices]
# a sequence of (i, j) tuples, each corresponding to an edge from i -> j
edges = zip(*conn_indices)
# initialize the graph from the edge sequence
G = Graph(edges=edges, directed=False)
# assign node names and weights to be attributes of the vertices and edges
# respectively
G.vs['label'] = np.arange(X.shape[0])
G.es['weight'] = weights
# get the vertex clustering corresponding to the best modularity
cm = G.community_multilevel()
# save the cluster membership of each node in a csv file
Series(cm.membership).to_csv('mem.csv', index=False)
def index_list(num, ind_list, ts_matrix):
i = 0
for z in zip(ind_list, ts_matrix):
if z[0] == num and i == 0:
output = np.array([z[1]])
i += 1
elif z[0] == num and i != 0:
output = np.append(output, [z[1]], axis=0)
return output
louvain_ind = read_csv('mem.csv').values.T
for f in files:
ts_matrix = np.loadtxt('timeseries/' + f).T
for i in range(1, 65):
subject = louvain_ind[:722 * i][0]
for j in range(4):
i_list = index_list(j, subject, ts_matrix)
avg = np.average(i_list, axis=1)
Series(avg).to_csv("module_matrices/subject" + str(i)
+ "mod" + str(j), index=False, sep="\t")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Phase 1
Step3: Step 2
Step4: Step 3
Step5: Step 4
Step6: Phase 2
|
7,343
|
<ASSISTANT_TASK:>
Python Code:
# You can store integers
x = 10
# You can store strings
y = "Hi, my name is Paul"
# A variable can be as long as you like. It is best to use variable names
# that express what the variable is.
long_variable_names_work_too = 1.3
hi = 'hello'
print("It will change")
# Here are some integers:
2
5
5000
# Here is some regular division:
5/2
# Here is some integer division:
5//2
# Here are some floating point numbers:
1.4
200.12
.008
# Here is some floating point number division:
1000.15/13
"This is a string."
'This is also a string.'
my_string = "This is my string."
print(my_string[0])
print(my_string[11:15])
print(my_string[-4:])
print(1<5)
print(2>5)
print(4==4)
# This is an empty list:
[]
# This is a list with some information.
[1, 2, 3, 4, 5, 6]
numbers = [1,2,3,4,5,6]
print(numbers[0])
# This is an empty list:
{}
# This is a list with some information:
{"Independence Day":"July 4th", "Halloween":"October 31st", "Labor Day 2016":"September 6th"}
holiday_dates = {"Independence Day":"July 4th", "Halloween":"October 31st", "Labor Day 2016":"September 6th"}
print(holiday_dates["Halloween"])
i = 0
while i < 4:
print(i)
# Increase i by one. This can also be written i += 1
i = i + 1
animals = ["tiger", "lion", "monkey", "pig"]
for animal in animals:
print(animal)
import math, os, re
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Printing
Step2: A Few Data Types
Step3: Floats
Step4: Note the trailing numbers. They are not extremely precise. Be careful
Step5: Getting a substring
Step6: Boolean Values
Step7: Lists
Step8: Retriving Information From Lists
Step9: Dictionaries
Step10: Retreiving information from a dictionary
Step11: Python Structure
Step12: For loops
Step13: Libraries
|
7,344
|
<ASSISTANT_TASK:>
Python Code:
# Author: Annalisa Pascarella <a.pascarella@iac.cnr.it>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
from nilearn import plotting
import mne
from mne.minimum_norm import make_inverse_operator, apply_inverse
# Set dir
data_path = mne.datasets.sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
src = mne.setup_source_space(subject, spacing='oct5',
add_dist=False, subjects_dir=subjects_dir)
vol_src = mne.setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=False, # just for speed, usually this should be True
verbose=True)
# Generate the mixed source space
src += vol_src
print(f"The source space contains {len(src)} spaces and "
f"{sum(s['nuse'] for s in src)} vertices")
src.plot(subjects_dir=subjects_dir)
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True, overwrite=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')
fwd = mne.make_forward_solution(
fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
del src # save memory
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
print(f"The fwd source space contains {len(fwd['src'])} spaces and "
f"{sum(s['nuse'] for s in fwd['src'])} vertices")
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
snr = 3.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
loose = dict(surface=0.2, volume=1.)
lambda2 = 1.0 / snr ** 2
inverse_operator = make_inverse_operator(
evoked.info, fwd, noise_cov, depth=None, loose=loose, verbose=True)
del fwd
stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
src = inverse_operator['src']
initial_time = 0.1
stc_vec = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori='vector')
brain = stc_vec.plot(
hemi='both', src=inverse_operator['src'], views='coronal',
initial_time=initial_time, subjects_dir=subjects_dir)
brain = stc.surface().plot(initial_time=initial_time,
subjects_dir=subjects_dir)
fig = stc.volume().plot(initial_time=initial_time, src=src,
subjects_dir=subjects_dir)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(
subject, parc=parc, subjects_dir=subjects_dir)
label_ts = mne.extract_label_time_course(
[stc], labels_parc, src, mode='mean', allow_empty=True)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stc.times, label_ts[0][-1, :].T, 'r', label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up our source space
Step2: Get a surface-based source space, here with few source points for speed
Step3: Now we create a mixed src space by adding the volume regions specified in the
Step4: View the source space
Step5: We could write the mixed source space with
Step6: Compute the fwd matrix
Step7: Compute inverse solution
Step8: Plot the mixed source estimate
Step9: Plot the surface
Step10: Plot the volume
Step11: Process labels
|
7,345
|
<ASSISTANT_TASK:>
Python Code:
def numberOfArithmeticSequences(L , N ) :
if(N <= 2 ) :
return 0
count = 0
res = 0
for i in range(2 , N ) :
if(( L[i ] - L[i - 1 ] ) ==(L[i - 1 ] - L[i - 2 ] ) ) :
count += 1
else :
count = 0
res += count
return res
L =[1 , 3 , 5 , 6 , 7 , 8 ]
N = len(L )
print(numberOfArithmeticSequences(L , N ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,346
|
<ASSISTANT_TASK:>
Python Code:
%%file vofz.scons
Flow('vel',None,'spike n1=501 nsp=4 mag=0.5 k1=101,201,301,401 | causint | add add=2')
Result('vel',
'''
graph min2=0 max2=5 label2=Velocity unit2=km/s plotfat=3
transp=y yreverse=y wanttitle=n wherexlabel=t
''')
from m8r import view
view('vel')
%%file cmp.scons
# Define reflectivity as a function of ray parameter
Flow('refl','vel',
'''
ai2refl | ricker1 frequency=10 |
spray axis=2 n=251 d=0.01 o=0
''')
# Model CMP gather
Flow('cmp','refl vel',
'''
itxmo velocity=${SOURCES[1]} nx=150 dx=0.02 x0=0.02 inv=y |
window f2=2 max2=3 | put label2=Offset unit2=km |
mutter v0=1.8 half=n |
noise seed=2016 var=1e-6
''')
Result('cmp','grey title="CMP Gather" ')
view('cmp')
%%file velan.scons
# Velocity scan
Flow('vscan','cmp','vscan semblance=y half=n v0=1.5 nv=101 dv=0.02')
Plot('vscan','grey allpos=y color=j title="Semblance Scan" ')
# Automatic picking
Flow('vpick','vscan','scale axis=2 | pick rect1=40 vel0=2')
Plot('vpick',
'''
graph pad=n transp=y yreverse=y dash=1 plotcol=0 plotfat=3 wantaxis=n wanttitle=n min2=1.5 max2=3.5
''')
# Overlay pick on velocity scan
Result('vscan','vscan vpick','Overlay')
view('vscan')
%%file nmo.scons
# Apply NMO
Flow('nmo','cmp vpick','nmo half=n velocity=${SOURCES[1]}')
Result('nmo','grey title="Normal Moveout" ')
view('nmo')
%%file vrms.scons
Flow('vrms','vel','mul $SOURCE | causint | math output="sqrt(input*0.004/(x1+0.004))" ')
Result('vrms','vel vrms vpick',
'''
cat axis=2 ${SOURCES[1:3]} |
graph dash=0,0,1 title="RMS Velocity" label2=Velocity unit2=km/s
transp=y yreverse=y wherexlabel=t wheretitle=b min2=0 max2=5
''')
view('vrms')
%%file dix.scons
# Find position of reflectors
Flow('refs','nmo','stack | envelope | max1 | window n1=4 | real | pad beg1=1')
# Find RMS velocities for layers
Flow('vn','vrms refs','inttest1 coord=${SOURCES[1]} interp=spline nw=4')
Plot('picks','refs vn',
'''
cmplx ${SOURCES[1]} |
graph symbol=o symbolsz=8 wanttitle=n wantaxis=n plotcol=3
transp=y yreverse=y min1=0 max1=2 min2=0 max2=5 pad=n
''')
# Find layer time thicknesses
Flow('dt','refs','igrad')
# Dix inversion
Flow('vdixn','vn refs dt',
'mul ${SOURCES[:2]} | igrad | div ${SOURCES[2]} | math output="sqrt(input)" ')
Flow('vdix','vdixn refs',
'''
pad beg1=1 | igrad | window n1=4 |
bin1 head=${SOURCES[1]} x0=0 dx=0.004 nx=501 | causint
''')
Plot('vdix','vel vrms vdix',
'''
cat axis=2 ${SOURCES[1:3]} |
graph dash=0,0,1 title="Dix Inversion" label2=Velocity unit2=km/s
transp=y yreverse=y wherexlabel=t wheretitle=b min2=0 max2=5 pad=n
''')
Result('vdix','vdix picks','Overlay')
from m8r import view
view('vdix')
%%file pegleg.scons
# Download pre-processed CMP gathers
# from the Viking Graben dataset
Fetch('paracdp.segy','viking')
# Convert to RSF
Flow('paracdp tparacdp','paracdp.segy',
'segyread tfile=${TARGETS[1]}')
# Convert to CDP gathers, time-power gain and high-pass filter
Flow('cmps','paracdp',
'''
intbin xk=cdpt yk=cdp | window max1=4 |
pow pow1=2 | bandpass flo=5
''')
# Extract offsets
Flow('offsets','tparacdp',
'''
headermath output=offset |
intbin head=$SOURCE xk=cdpt yk=cdp |
dd type=float |
scale dscale=0.001
''')
# Extract one CMP gather
########################
Flow('cmp1','cmps','window n3=1 f3=1000')
Flow('offset1','offsets','window n3=1 f3=1000 squeeze=n')
# Velocity scan
Flow('vscan1','cmp1 offset1',
'''
vscan semblance=y half=n v0=1.4 nv=121 dv=0.02
offset=${SOURCES[1]}
''')
Plot('vscan1',
'grey color=j allpos=y title="Semblance Scan" unit2=km/s')
# Automatic pick
Flow('vpick1','vscan1','mutter inner=y v0=0.4 t0=0.7 x0=1.4 half=n | pick rect1=25 vel0=1.45')
Plot('vpick1',
'''
graph yreverse=y transp=y plotcol=7 plotfat=7
pad=n min2=1.4 max2=3.8 wantaxis=n wanttitle=n
''')
# Predict multiple velocity
vw=1.45 # water velocity
tw=0.48 # water depth
nw=121 # water depth in time samples
dt=0.004
Flow('water','vpick1','spike k1=%d | causint' % nw)
Flow('vmult1','vpick1 water',
'''
pad beg1=%d | window n1=1001 | put o1=0 |
math output="input*input" |
math m=${SOURCES[1]} output="(1-m)*%g+m*sqrt((input*(x1-%g)+%g)/(x1+%g))"
''' % (nw,vw,tw,vw*vw*tw,dt))
Plot('vmult1',
'''
graph yreverse=y transp=y plotcol=7 plotfat=7 dash=1
pad=n min2=1.4 max2=3.8 wantaxis=n wanttitle=n
''')
Result('vscan2','vscan1 vpick1 vmult1','Overlay')
from m8r import view
view('vscan2')
%%file nmomult.scons
Flow('nmo1','cmp1 offset1 vpick1','nmo half=n offset=${SOURCES[1]} velocity=${SOURCES[2]}')
Flow('nmo2','cmp1 offset1 vmult1','nmo half=n offset=${SOURCES[1]} velocity=${SOURCES[2]}')
Plot('nmo1','window min1=2.5 | grey title="NMO with a higher velocity" ')
Plot('nmo2','window min1=2.5 | grey title="NMO with a lower velocity" ')
Result('nmo2','nmo1 nmo2','SideBySideAniso')
view('nmo2')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dix inversion
Step2: Questions
|
7,347
|
<ASSISTANT_TASK:>
Python Code:
import ipywidgets as widgets
import os
image_path = os.path.abspath('../../data_files/trees.jpg')
with open(image_path, 'rb') as f:
raw_image = f.read()
ipyimage = widgets.Image(value=raw_image, format='jpg')
ipyimage
from bqplot import LinearScale, Figure, Lines, Axis, Image
# Create the scales for the image coordinates
scales={'x': LinearScale(), 'y': LinearScale()}
# Define the bqplot Image mark
image = Image(image=ipyimage, scales=scales)
# Create the bqplot Figure to display the mark
fig = Figure(title='Trees', marks=[image], padding_x=0, padding_y=0)
fig
scales = {'x': LinearScale(min=-1, max=2), 'y': LinearScale(min=-0.5, max=2)}
image = Image(image=ipyimage, scales=scales)
lines = Lines(x=[0, 1, 1, 0, 0], y=[0, 0, 1, 1, 0], scales=scales, colors=['red'])
fig = Figure(marks=[image, lines], padding_x=0, padding_y=0, animation_duration=1000)
fig.axes = [Axis(scale=scales['x']), Axis(scale=scales['y'], orientation='vertical')]
fig
# Full screen
image.x = [-1, 2]
image.y = [-.5, 2]
def print_event(_, target):
print(target)
image.on_element_click(print_event)
import bqplot.pyplot as bqp
bqp.figure()
bqp.imshow(image_path, 'filename')
bqp.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Displaying the image inside a bqplot Figure
Step2: Mixing with other marks
Step3: Its traits (attributes) will also respond dynamically to a change from the backend
Step4: Interactions
Step5: Pyplot
|
7,348
|
<ASSISTANT_TASK:>
Python Code:
# This is an example of a Python code cell.
# Note that I can include text as long as I use the # symbol (Python comment)
# Results of my code will display below the input
print 3+5
# We usually want to begin every notebook by setting up our tools:
# graphics in the notebook, rather than in separate windows
%matplotlib inline
# Some standard imports
import numpy as np
import matplotlib.pyplot as plt
# We need the custom climlab package for this assignment
import climlab
# This is a code cell.
# Use the '+' button on the toolbar above to add new cells.
# Use the arrow buttons to reorder cells.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Go ahead and edit the Python code cell above to do something different. To evaluate whatever is in the cell, just press shift-enter.
Step2: Question 1
|
7,349
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
import seaborn as sns; sns.set() # prettify matplotlib
import numpy as np
import sklearn.gaussian_process as gp
# local modules
import turbo as tb
import turbo.modules as tm
import turbo.plotting as tp
import turbo.gui.jupyter as tg
# Make deterministic
np.random.seed(0)
xmin, xmax = 0, 6
ymin, ymax = 0, 6
noisy = True
x = np.linspace(xmin, xmax, 100)
y = np.linspace(ymin, ymax, 100)
X, Y = np.meshgrid(x, y)
# vectorize needed for accessing noise
#@np.vectorize
def f(x, y):
''' from https://github.com/fmfn/BayesianOptimization/issues/18 '''
a = np.exp(-( (x - 2)**2/0.7 + (y - 4)**2/1.2) + (x - 2)*(y - 4)/1.6 )
b = np.exp(-( (x - 4)**2/3 + (y - 2)**2/2.) )
c = np.exp(-( (x - 4)**2/0.5 + (y - 4)**2/0.5) + (x - 4)*(y - 4)/0.5 )
d = np.sin(3.1415 * x)
e = np.exp(-( (x - 5.5)**2/0.5 + (y - 5.5)**2/.5) )
val = 2*a + b - c + 0.17 * d + 2*e
return val + np.random.normal(0, 0.2, size=None if isinstance(x, float) else x.shape) if noisy else val
Z = f(X, Y)
tp.surface_3D(X, Y, Z)
def calc_best():
# 2.06434770773
x = np.linspace(2.0, 2.2, 1000)
y = np.linspace(3.9, 4.1, 1000)
return np.max(f(*np.meshgrid(x, y)))
best_z = calc_best()
bounds = [
('x', xmin, xmax),
('y', ymin, ymax)
]
op = tb.Optimiser(f, 'max', bounds, pre_phase_trials=10, settings_preset='default')
#op.pre_phase_select = tm.LHS_selector(num_total=5)
op.surrogate = tm.SciKitGPSurrogate(model_params=dict(
alpha = 1e-3, # larger => more noise. Default = 1e-10
kernel = 1.0 * gp.kernels.RBF(length_scale_bounds=(1e-2, 5)),
n_restarts_optimizer = 10,
normalize_y = True
))
op.acquisition = tm.UCB(beta=lambda trial_num: np.log(trial_num+1))
tp.plot_acquisition_parameter_function(op.acquisition.beta, 0, 20)
rec = tb.Recorder(op)
tg.OptimiserProgressBar(op)
op.run(max_trials=30)
op.get_incumbent()
tp.plot_error(rec, true_best=best_z);
tp.plot_surrogate_hyper_params_2D(rec);
tp.interactive_plot_trial_1D(rec)
tp.interactive_plot_trial_2D(rec, true_objective=f);
ro = tb.Optimiser(f, 'max', bounds=bounds, pre_phase_trials=float('inf'), settings_preset='random_search')
rrec = tb.Recorder(ro)
tg.OptimiserProgressBar(ro)
ro.run(max_trials=100)
ro.get_incumbent()
tp.plot_error(rrec, true_best=best_z, log_scale=False, fig_ax=plt.subplots(figsize=(8, 3)));
tp.interactive_plot_trial_1D(rrec, param='x');
tp.interactive_plot_trial_2D(rrec, x_param='x', y_param='y', true_objective=f);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Target Function
Step2: Helper Functions
Step3: Try optimising the same function with random search
|
7,350
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
Just an arbitrary pattern, you can change it if you wish
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
Repeats the same pattern at each period
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size, batch_size=128)
model = keras.models.Sequential([
keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.SimpleRNN(100),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-7 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-7, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size, batch_size=128)
valid_set = window_dataset(x_valid, window_size, batch_size=128)
model = keras.models.Sequential([
keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.SimpleRNN(100),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1.5e-6, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint", save_best_only=True)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint")
rnn_forecast = model_forecast(
model,
series[split_time - window_size:-1],
window_size)[:, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
for X_batch, Y_batch in seq2seq_window_dataset(tf.range(10), 3,
batch_size=1):
print("X:", X_batch.numpy())
print("Y:", Y_batch.numpy())
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True,
input_shape=[None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-7 * 10**(epoch / 30))
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-7, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True,
input_shape=[None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1e-6, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=10)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping])
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Forecasting with an RNN
Step4: Simple RNN Forecasting
Step5: Sequence-to-Sequence Forecasting
|
7,351
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import gpflow
import gpflowopt
import numpy as np
# Objective
def vlmop2(x):
transl = 1 / np.sqrt(2)
part1 = (x[:, [0]] - transl) ** 2 + (x[:, [1]] - transl) ** 2
part2 = (x[:, [0]] + transl) ** 2 + (x[:, [1]] + transl) ** 2
y1 = 1 - np.exp(-1 * part1)
y2 = 1 - np.exp(-1 * part2)
return np.hstack((y1, y2))
# Setup input domain
domain = gpflowopt.domain.ContinuousParameter('x1', -2, 2) + \
gpflowopt.domain.ContinuousParameter('x2', -2, 2)
# Plot
def plotfx():
X = gpflowopt.design.FactorialDesign(101, domain).generate()
Z = vlmop2(X)
shape = (101, 101)
axes = []
plt.figure(figsize=(15, 5))
for i in range(Z.shape[1]):
axes = axes + [plt.subplot2grid((1, 2), (0, i))]
axes[-1].contourf(X[:,0].reshape(shape), X[:,1].reshape(shape), Z[:,i].reshape(shape))
axes[-1].set_title('Objective {}'.format(i+1))
axes[-1].set_xlabel('x1')
axes[-1].set_ylabel('x2')
axes[-1].set_xlim([domain.lower[0], domain.upper[0]])
axes[-1].set_ylim([domain.lower[1], domain.upper[1]])
return axes
plotfx();
# Initial evaluations
design = gpflowopt.design.LatinHyperCube(11, domain)
X = design.generate()
Y = vlmop2(X)
# One model for each objective
objective_models = [gpflow.gpr.GPR(X.copy(), Y[:,[i]].copy(), gpflow.kernels.Matern52(2, ARD=True)) for i in range(Y.shape[1])]
for model in objective_models:
model.likelihood.variance = 0.01
hvpoi = gpflowopt.acquisition.HVProbabilityOfImprovement(objective_models)
# First setup the optimization strategy for the acquisition function
# Combining MC step followed by L-BFGS-B
acquisition_opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, 1000),
gpflowopt.optim.SciPyOptimizer(domain)])
# Then run the BayesianOptimizer for 20 iterations
optimizer = gpflowopt.BayesianOptimizer(domain, hvpoi, optimizer=acquisition_opt, verbose=True)
result = optimizer.optimize([vlmop2], n_iter=20)
print(result)
print(optimizer.acquisition.pareto.front.value)
def plot():
grid_size = 51 # 101
shape = (grid_size, grid_size)
Xeval = gpflowopt.design.FactorialDesign(grid_size, domain).generate()
Yeval_1, _ = hvpoi.models[0].predict_f(Xeval)
Yeval_2, _ = hvpoi.models[1].predict_f(Xeval)
Yevalc = hvpoi.evaluate(Xeval)
plots = [((0,0), 1, 1, 'Objective 1 model', Yeval_1[:,0]),
((0,1), 1, 1, 'Objective 2 model', Yeval_2[:,0]),
((1,0), 2, 2, 'hypervolume-based PoI', Yevalc)]
plt.figure(figsize=(7,7))
for i, (plot_pos, plot_rowspan, plot_colspan, plot_title, plot_data) in enumerate(plots):
data = hvpoi.data[0]
ax = plt.subplot2grid((3, 2), plot_pos, rowspan=plot_rowspan, colspan=plot_colspan)
ax.contourf(Xeval[:,0].reshape(shape), Xeval[:,1].reshape(shape), plot_data.reshape(shape))
ax.scatter(data[:,0], data[:,1], c='w')
ax.set_title(plot_title)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_xlim([domain.lower[0], domain.upper[0]])
ax.set_ylim([domain.lower[1], domain.upper[1]])
plt.tight_layout()
# Plot representing the model belief, and the belief mapped to EI and PoF
plot()
for model in objective_models:
print(model)
# plot pareto front
plt.figure(figsize=(9, 4))
R = np.array([1.5, 1.5])
print('R:', R)
hv = hvpoi.pareto.hypervolume(R)
print('Hypervolume indicator:', hv)
plt.figure(figsize=(7, 7))
pf, dom = gpflowopt.pareto.non_dominated_sort(hvpoi.data[1])
plt.scatter(hvpoi.data[1][:,0], hvpoi.data[1][:,1], c=dom)
plt.title('Pareto set')
plt.xlabel('Objective 1')
plt.ylabel('Objective 2')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We setup the Veldhuizen and Lamont multiobjective optimization problem 2 (vlmop2). The objectives of vlmop2 are very easy to model. Ideal for illustrating Bayesian multiobjective optimization.
Step2: Multiobjective acquisition function
Step3: Running the Bayesian optimizer
Step4: For multiple objectives the returned OptimizeResult object contains the identified Pareto set instead of just a single optimum. Note that this is computed on the raw data Y.
Step5: Finally, we can extract and plot the Pareto front ourselves using the pareto.non_dominated_sort function on the final data matrix Y.
|
7,352
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'str': ['Aa', 'Bb', '?? ?', '###', '{}xxa;']})
def g(df):
df["new"] = df.apply(lambda p: sum(q.isalpha() for q in p["str"] ), axis=1)
return df
df = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,353
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
data = pd.read_csv('data/hki_liikennemaarat.csv', encoding='latin-1',delimiter=';')
data.head()
laru = data[data.nimi == 'LAUTTASAAREN SILTA']
laru = laru.loc[:,['suunta','aika','vuosi','autot','ha','pa','ka','ra','la','mp']]
laru['tunti'] = (laru['aika'] / 100).apply(np.floor)
laru
laru['light traffic'] = laru['ha'] + laru['pa'] + laru['mp']
laru['heavy traffic'] = laru['ka'] + laru['ra'] + laru['la']
laru['buses'] = laru['la']
laru.head()
laru = laru.drop(['autot','ha','pa','ka','ra','la','mp'],axis=1)
laru.head()
yrl = laru.groupby(['vuosi','suunta','tunti']).sum()
yrl.reset_index(inplace=True)
yrl
y_2016_tohel = yrl[(yrl.vuosi == 2016) & (yrl.suunta == 1.0) ]
y_2016_tolaru = yrl[(yrl.vuosi == 2016) & (yrl.suunta == 2.0) ]
y_2016_tohel = y_2016_tohel.drop(['aika','suunta','vuosi'],axis = 1)
y_2016_tolaru = y_2016_tolaru.drop(['aika','suunta','vuosi'],axis = 1)
y_2016_tohel.head()
y_2016_tolaru.head()
y_2016_tohel['time'] = y_2016_tohel['tunti'].apply(lambda x: pd.to_timedelta(x, unit='h'))
y_2016_tolaru['time'] = y_2016_tolaru['tunti'].apply(lambda x: pd.to_timedelta(x, unit='h'))
y_2016_tolaru = y_2016_tolaru.set_index('time')
y_2016_tohel = y_2016_tohel.set_index('time')
y_2016_tohel
y_2016_tohel = y_2016_tohel.drop(['tunti'], axis = 1)
y_2016_tolaru = y_2016_tolaru.drop(['tunti'], axis = 1)
y_2016_tolaru
xinterval = pd.date_range('1/1/2011', periods=24, freq='H').time
plt.figure(figsize=(23,13))
plt.xticks(xinterval,rotation=45)
plt.grid(True)
plt.title('Traffic towards Lauttasaari. Representative sample from 2016, 1 hour sample interval')
plt.plot(xinterval,y_2016_tolaru['light traffic']);
plt.plot(xinterval,y_2016_tolaru['heavy traffic']);
plt.plot(xinterval,y_2016_tolaru['buses']);
plt.legend(['light traffic','heavy traffic','buses']);
plt.xlabel('Time');
plt.ylabel('Vehicles / hour');
plt.figure(figsize=(23,13))
plt.xticks(xinterval,rotation=45)
plt.grid(True)
plt.title('Traffic towards Helsinki. Representative sample from 2016, 1 hour sample interval')
plt.plot(xinterval,y_2016_tohel['light traffic']);
plt.plot(xinterval,y_2016_tohel['heavy traffic']);
plt.plot(xinterval,y_2016_tohel['buses']);
plt.legend(['light traffic','heavy traffic','buses']);
plt.xlabel('Time');
plt.ylabel('Vehicles / hour');
y_2016_tohel.to_csv('data/dist_larubridge_tohel_16.csv')
y_2016_tolaru.to_csv('data/dist_larubridge_tolaru_16.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First download the dataset and place it in the data/ folder
Step2: The columns are
Step3: Since the time series is at uneven intervals some reductions have to be made.
Step4: Lets define light as
|
7,354
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image, HTML, display
assert True # leave this to grade the import statements
Image(url='http://www.elevationnetworks.org/wp-content/uploads/2013/05/physics.jpeg', embed=True, width=600, height=600)
assert True # leave this to grade the image display
q = <table>
<tr>
<th>Name</th>
<th>Symbol</th>
<th>Antiparticle</th>
<th>Charge (e)</th>
<th>Mass (MeV/c$^2$)</th>
</tr>
<tr>
<td>down</td>
<td>d</td>
<td>$\\bar{\\textrm{d}}$</td>
<td>$-\\frac{1}{3}$</td>
<td>3.5-6.0</td>
</tr>
<tr>
<td>bottom</td>
<td>b</td>
<td>$\\bar{\\textrm{b}}$</td>
<td>$-\\frac{1}{3}$</td>
<td>4,130-4,370</td>
</tr>
<tr>
<td>strange</td>
<td>s</td>
<td>$\\bar{\\textrm{s}}$</td>
<td>$-\\frac{1}{3}$</td>
<td>70-130</td>
</tr>
<tr>
<td>charm</td>
<td>c</td>
<td>$\\bar{\\textrm{c}}$</td>
<td>$+\\frac{2}{3}$</td>
<td>1,160-1,340</td>
</tr>
<tr>
<td>up</td>
<td>u</td>
<td>$\\bar{\\textrm{u}}$</td>
<td>$+\\frac{2}{3}$</td>
<td>1.5-3.3</td>
</tr>
<tr>
<td>top</td>
<td>t</td>
<td>$\\bar{\\textrm{t}}$</td>
<td>$+\\frac{2}{3}$</td>
<td>169,100-173,300</td>
</tr>
display(HTML(q))
assert True # leave this here to grade the quark table
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic rich display
Step3: Use the HTML object to display HTML in the notebook that reproduces the table of Quarks on this page. This will require you to learn about how to create HTML tables and then pass that to the HTML object for display. Don't worry about styling and formatting the table, but you should use LaTeX where appropriate.
|
7,355
|
<ASSISTANT_TASK:>
Python Code:
%%bash
echo committees
ls -lah ../data/committees/dist/dist/committees | wc -l
echo factions
ls -lah ../data/committees/dist/dist/factions | wc -l
echo meetings
ls -lah ../data/committees/dist/dist/meetings/*/* | wc -l
echo members
ls -lah ../data/committees/dist/dist/members | wc -l
!{'cd /pipelines; dpp run --verbose ./knesset/generate-sitemap'}
%%bash
echo number of committees: `cat ../data/committees/dist/dist/sitemap.txt | grep committees | wc -l`
echo first 10 committees:
cat ../data/committees/dist/dist/sitemap.txt | grep committees | head
echo number of meetings: `cat ../data/committees/dist/dist/sitemap.txt | grep meetings | wc -l`
echo first 10 meetings:
cat ../data/committees/dist/dist/sitemap.txt | grep meetings | head
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the generate-sitemap pipeline
Step2: View the sitemap
|
7,356
|
<ASSISTANT_TASK:>
Python Code:
import math
def calculateSum(n ) :
a = int(n )
return(2 *(pow(n , 6 ) + 15 * pow(n , 4 ) + 15 * pow(n , 2 ) + 1 ) )
if __name__== ' __main __' :
n = 1.4142
print(math . ceil(calculateSum(n ) ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,357
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.set_option('max_rows', 10)
c = pd.Categorical(['a', 'b', 'b', 'c', 'a', 'b', 'a', 'a', 'a', 'a'])
c
c.describe()
c.codes
c.categories
c.as_ordered()
dta = pd.DataFrame.from_dict({'factor': c,
'x': np.random.randn(10)})
dta.head()
dta.dtypes
dta.factor.cat
dta.factor.cat.categories
dta.factor.describe()
# [Solution Here]
%load solutions/load_nfs_categorical.py
dates = pd.date_range("1/1/2015", periods=75, freq="D")
dates
y = pd.Series(np.random.randn(75), index=dates)
y.head()
y.reset_index().dtypes
dta = (y.reset_index(name='t').
rename(columns={'index': 'y'}))
dta.head()
dta.dtypes
dta.y.dt.freq
dta.y.dt.day
y.ix["2015-01-01":"2015-01-15"]
y["2015-01"]
resample = y.resample("M")
resample.mean()
y.asfreq('H', method='ffill')
y
y.shift(1)
y.shift(-1)
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000',
periods=1000))
ts = ts.cumsum()
rolling = ts.rolling(window=60)
rolling
rolling.mean()
# [Solution here]
%load solutions/load_nfs_datetime.py
# this is a bit slow because of the date parsing
transit = pd.read_csv("../data/AIS/transit_segments.csv",
parse_dates=['st_time', 'end_time'],
infer_datetime_format=True)
vessels = pd.read_csv("../data/AIS/vessel_information.csv")
vessels.head()
transit.head()
vessels.columns.intersection(transit.columns)
transit.merge(vessels).head()
A = pd.DataFrame(np.random.randn(25, 2),
index=pd.date_range('1/1/2015', periods=25))
A[2] = np.repeat(list('abcde'), 5)
A
B = pd.DataFrame(np.random.randn(5, 2))
B[2] = list('abcde')
B
A.merge(B, on=2)
transit.set_index('mmsi', inplace=True)
vessels.set_index('mmsi', inplace=True)
transit.join(vessels).head()
%load solutions/join_nfs.py
df1 = pd.read_csv('../data/ebola/guinea_data/2014-08-04.csv',
index_col=['Date', 'Description'])
df2 = pd.read_csv('../data/ebola/guinea_data/2014-08-26.csv',
index_col=['Date', 'Description'])
df1.shape
df2.shape
df1.head()
df2.head()
df1.index.is_unique
df2.index.is_unique
df = pd.concat((df1, df2), axis=0)
df.shape
# [Solution here]
%load solutions/concat_nfs.py
vessels.type
vessels.type.str.count('/').max()
vessels.type.str.split('/', expand=True)
# [Solution here]
%load solutions/nfs_dairy.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Categorical Types
Step2: By default the Categorical type represents an unordered categorical
Step3: Support in DataFrames
Step4: Exercise
Step5: Date and Time Types
Step6: Support in DataFrames
Step7: Indexing with Dates
Step8: DatetimeIndex supports partial string indexing
Step9: You can resample to a lower frequency, specifying how to aggregate
Step10: Or go to a higher frequency, optionally specifying how to fill in the
Step11: There are convenience methods to lag and lead time series
Step12: Rolling and Window Functions
Step13: Exercise
Step14: Merging and Joining DataFrames
Step15: A lot of the time data that comes from relational databases will be normalized
Step16: Several ships in the vessels data have traveled multiple segments as we would expect
Step17: Merging
Step18: Watch out, when merging on columns, indices are discarded
Step19: Joins
Step20: Exercise
Step21: Concatenation
Step22: We can concatenate on the rows
Step23: Exercise
Step24: Text Data Manipulation
Step25: Count the vessel separators
Step26: Split on these accessors and expand to return a DataFrame with nan-padding
Step27: Exercise
|
7,358
|
<ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
from oemof.solph import (Sink, Source, Transformer, Bus, Flow, Model,
EnergySystem)
import oemof.outputlib as outputlib
import pickle
solver = 'cbc'
# initialize and provide data
datetimeindex = pd.date_range('1/1/2016', periods=24*10, freq='H')
energysystem = EnergySystem(timeindex=datetimeindex)
filename = 'input_data.csv'
data = pd.read_csv(filename, sep=",")
# resource buses
bcoal = Bus(label='coal', balanced=False)
bgas = Bus(label='gas', balanced=False)
boil = Bus(label='oil', balanced=False)
blig = Bus(label='lignite', balanced=False)
# electricity and heat
bel = Bus(label='bel')
bth = Bus(label='bth')
energysystem.add(bcoal, bgas, boil, blig, bel, bth)
# an excess and a shortage variable can help to avoid infeasible problems
energysystem.add(Sink(label='excess_el', inputs={bel: Flow()}))
# shortage_el = Source(label='shortage_el',
# outputs={bel: Flow(variable_costs=200)})
# sources
energysystem.add(Source(label='wind', outputs={bel: Flow(
actual_value=data['wind'], nominal_value=66.3, fixed=True)}))
energysystem.add(Source(label='pv', outputs={bel: Flow(
actual_value=data['pv'], nominal_value=65.3, fixed=True)}))
# demands (electricity/heat)
energysystem.add(Sink(label='demand_el', inputs={bel: Flow(
nominal_value=85, actual_value=data['demand_el'], fixed=True)}))
energysystem.add(Sink(label='demand_th',
inputs={bth: Flow(nominal_value=40,
actual_value=data['demand_th'],
fixed=True)}))
# power plants
energysystem.add(Transformer(
label='pp_coal',
inputs={bcoal: Flow()},
outputs={bel: Flow(nominal_value=20.2, variable_costs=25)},
conversion_factors={bel: 0.39}))
energysystem.add(Transformer(
label='pp_lig',
inputs={blig: Flow()},
outputs={bel: Flow(nominal_value=11.8, variable_costs=19)},
conversion_factors={bel: 0.41}))
energysystem.add(Transformer(
label='pp_gas',
inputs={bgas: Flow()},
outputs={bel: Flow(nominal_value=41, variable_costs=40)},
conversion_factors={bel: 0.50}))
energysystem.add(Transformer(
label='pp_oil',
inputs={boil: Flow()},
outputs={bel: Flow(nominal_value=5, variable_costs=50)},
conversion_factors={bel: 0.28}))
# combined heat and power plant (chp)
energysystem.add(Transformer(
label='pp_chp',
inputs={bgas: Flow()},
outputs={bel: Flow(nominal_value=30, variable_costs=42),
bth: Flow(nominal_value=40)},
conversion_factors={bel: 0.3, bth: 0.4}))
# heat pump with a coefficient of performance (COP) of 3
b_heat_source = Bus(label='b_heat_source')
energysystem.add(b_heat_source)
energysystem.add(Source(label='heat_source', outputs={b_heat_source: Flow()}))
cop = 3
energysystem.add(Transformer(
label='heat_pump',
inputs={bel: Flow(),
b_heat_source: Flow()},
outputs={bth: Flow(nominal_value=10)},
conversion_factors={bel: 1/3, b_heat_source: (cop-1)/cop}))
# create optimization model based on energy_system
optimization_model = Model(energysystem=energysystem)
# solve problem
optimization_model.solve(solver=solver,
solve_kwargs={'tee': True, 'keepfiles': False})
energysystem.results['main'] = outputlib.processing.results(optimization_model)
energysystem.results['meta'] = outputlib.processing.meta_results(optimization_model)
string_results = outputlib.views.convert_keys_to_strings(energysystem.results['main'])
energysystem.dump(dpath=None, filename=None)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify solver
Step2: Create an energy system and optimize the dispatch at least costs.
Step3: Create and add components to energysystem
Step4: Optimization
Step5: Write results into energysystem.results object for later
Step6: Save results - Dump the energysystem (to ~/home/user/.oemof by default)
|
7,359
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
id=["Train A","Train A","Train A","Train B","Train B","Train B"]
arrival_time = ["0"," 2016-05-19 13:50:00","2016-05-19 21:25:00","0","2016-05-24 18:30:00","2016-05-26 12:15:00"]
departure_time = ["2016-05-19 08:25:00","2016-05-19 16:00:00","2016-05-20 07:45:00","2016-05-24 12:50:00","2016-05-25 23:00:00","2016-05-26 19:45:00"]
df = pd.DataFrame({'id': id, 'arrival_time':arrival_time, 'departure_time':departure_time})
import numpy as np
def g(df):
df['arrival_time'] = pd.to_datetime(df['arrival_time'].replace('0', np.nan))
df['departure_time'] = pd.to_datetime(df['departure_time'])
df['Duration'] = (df['arrival_time'] - df.groupby('id')['departure_time'].shift()).dt.total_seconds()
return df
df = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,360
|
<ASSISTANT_TASK:>
Python Code:
raw_corpus = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
# Create a set of frequent words
# stoplist = set('for a of the and to in'.split(' '))
# Lowercase each document, split it by white space and filter out stopwords
texts = [[word for word in document.lower().split()] # if word not in stoplist]
for document in raw_corpus]
# Count word frequencies
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
# Only keep words that appear more than once
processed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]
processed_corpus
from gensim import corpora
dictionary = corpora.Dictionary(processed_corpus)
print(dictionary)
print(dictionary.token2id)
new_doc = "Human computer interaction"
new_vec = dictionary.doc2bow(new_doc.lower().split())
new_vec
bow_corpus = [dictionary.doc2bow(text) for text in processed_corpus]
bow_corpus
from gensim import models
# train the model
tfidf = models.TfidfModel(bow_corpus)
# transform the "system minors" string
tfidf[dictionary.doc2bow("system minors".lower().split())]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is a particularly small example of a corpus for illustration purposes. Another example could be a list of all the plays written by Shakespeare, list of all wikipedia articles, or all tweets by a particular person of interest.
Step2: Before proceeding, we want to associate each word in the corpus with a unique integer ID. We can do this using the gensim.corpora.Dictionary class. This dictionary defines the vocabulary of all words that our processing knows about.
Step3: Because our corpus is small, there are only 12 different tokens in this Dictionary. For larger corpuses, dictionaries that contains hundreds of thousands of tokens are quite common.
Step4: For example, suppose we wanted to vectorize the phrase "Human computer interaction" (note that this phrase was not in our original corpus). We can create the bag-of-word representation for a document using the doc2bow method of the dictionary, which returns a sparse representation of the word counts
Step5: The first entry in each tuple corresponds to the ID of the token in the dictionary, the second corresponds to the count of this token.
Step6: Note that while this list lives entirely in memory, in most applications you will want a more scalable solution. Luckily, gensim allows you to use any iterator that returns a single document vector at a time. See the documentation for more details.
|
7,361
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from typing import Tuple
from scipy import special
from sklearn import metrics
import tensorflow as tf
import tensorflow_datasets as tfds
# Set verbosity.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from sklearn.exceptions import ConvergenceWarning
import warnings
warnings.simplefilter(action="ignore", category=ConvergenceWarning)
warnings.simplefilter(action="ignore", category=FutureWarning)
!pip install tensorflow_privacy
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import membership_inference_attack as mia
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackInputData
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackResultsCollection
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import PrivacyMetric
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import PrivacyReportMetadata
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import privacy_report
import tensorflow_privacy
dataset = 'cifar10'
num_classes = 10
activation = 'relu'
num_conv = 3
batch_size=50
epochs_per_report = 2
total_epochs = 50
lr = 0.001
#@title
print('Loading the dataset.')
train_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TRAIN, batch_size=-1))
test_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TEST, batch_size=-1))
x_train = train_ds['image'].astype('float32') / 255.
y_train_indices = train_ds['label'][:, np.newaxis]
x_test = test_ds['image'].astype('float32') / 255.
y_test_indices = test_ds['label'][:, np.newaxis]
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train_indices, num_classes)
y_test = tf.keras.utils.to_categorical(y_test_indices, num_classes)
input_shape = x_train.shape[1:]
assert x_train.shape[0] % batch_size == 0, "The tensorflow_privacy optimizer doesn't handle partial batches"
#@title
def small_cnn(input_shape: Tuple[int],
num_classes: int,
num_conv: int,
activation: str = 'relu') -> tf.keras.models.Sequential:
Setup a small CNN for image classification.
Args:
input_shape: Integer tuple for the shape of the images.
num_classes: Number of prediction classes.
num_conv: Number of convolutional layers.
activation: The activation function to use for conv and dense layers.
Returns:
The Keras model.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=input_shape))
# Conv layers
for _ in range(num_conv):
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation=activation))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation=activation))
model.add(tf.keras.layers.Dense(num_classes))
model.compile(
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
metrics=['accuracy'])
return model
model_2layers = small_cnn(
input_shape, num_classes, num_conv=2, activation=activation)
model_3layers = small_cnn(
input_shape, num_classes, num_conv=3, activation=activation)
class PrivacyMetrics(tf.keras.callbacks.Callback):
def __init__(self, epochs_per_report, model_name):
self.epochs_per_report = epochs_per_report
self.model_name = model_name
self.attack_results = []
def on_epoch_end(self, epoch, logs=None):
epoch = epoch+1
if epoch % self.epochs_per_report != 0:
return
print(f'\nRunning privacy report for epoch: {epoch}\n')
logits_train = self.model.predict(x_train, batch_size=batch_size)
logits_test = self.model.predict(x_test, batch_size=batch_size)
prob_train = special.softmax(logits_train, axis=1)
prob_test = special.softmax(logits_test, axis=1)
# Add metadata to generate a privacy report.
privacy_report_metadata = PrivacyReportMetadata(
# Show the validation accuracy on the plot
# It's what you send to train_accuracy that gets plotted.
accuracy_train=logs['val_accuracy'],
accuracy_test=logs['val_accuracy'],
epoch_num=epoch,
model_variant_label=self.model_name)
attack_results = mia.run_attacks(
AttackInputData(
labels_train=y_train_indices[:, 0],
labels_test=y_test_indices[:, 0],
probs_train=prob_train,
probs_test=prob_test),
SlicingSpec(entire_dataset=True, by_class=True),
attack_types=(AttackType.THRESHOLD_ATTACK,
AttackType.LOGISTIC_REGRESSION),
privacy_report_metadata=privacy_report_metadata)
self.attack_results.append(attack_results)
all_reports = []
callback = PrivacyMetrics(epochs_per_report, "2 Layers")
history = model_2layers.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=total_epochs,
validation_data=(x_test, y_test),
callbacks=[callback],
shuffle=True)
all_reports.extend(callback.attack_results)
callback = PrivacyMetrics(epochs_per_report, "3 Layers")
history = model_3layers.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=total_epochs,
validation_data=(x_test, y_test),
callbacks=[callback],
shuffle=True)
all_reports.extend(callback.attack_results)
results = AttackResultsCollection(all_reports)
privacy_metrics = (PrivacyMetric.AUC, PrivacyMetric.ATTACKER_ADVANTAGE)
epoch_plot = privacy_report.plot_by_epochs(
results, privacy_metrics=privacy_metrics)
privacy_metrics = (PrivacyMetric.AUC, PrivacyMetric.ATTACKER_ADVANTAGE)
utility_privacy_plot = privacy_report.plot_privacy_vs_accuracy(
results, privacy_metrics=privacy_metrics)
for axis in utility_privacy_plot.axes:
axis.set_xlabel('Validation accuracy')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Assess privacy risks with the TensorFlow Privacy Report
Step2: Install TensorFlow Privacy.
Step3: Train two models, with privacy metrics
Step4: Next, load the dataset. There's nothing privacy-specific in this code.
Step6: Next define a function to build the models.
Step7: Build two three-layer CNN models using that function.
Step8: Define a callback to collect privacy metrics
Step9: Train the models
Step10: Epoch Plots
Step11: See that as a rule, privacy vulnerability tends to increase as the number of epochs goes up. This is true across model variants as well as different attacker types.
|
7,362
|
<ASSISTANT_TASK:>
Python Code:
from infomap import infomap
infomapWrapper = infomap.Infomap("--two-level")
# Add link weight as an optional third argument
infomapWrapper.addLink(0, 1)
infomapWrapper.addLink(0, 2)
infomapWrapper.addLink(0, 3)
infomapWrapper.addLink(1, 0)
infomapWrapper.addLink(1, 2)
infomapWrapper.addLink(2, 1)
infomapWrapper.addLink(2, 0)
infomapWrapper.addLink(3, 0)
infomapWrapper.addLink(3, 4)
infomapWrapper.addLink(3, 5)
infomapWrapper.addLink(4, 3)
infomapWrapper.addLink(4, 5)
infomapWrapper.addLink(5, 4)
infomapWrapper.addLink(5, 3)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#node module")
for node in tree.leafIter():
print("%d %d" % (node.physIndex, node.moduleIndex()))
infomapWrapper = infomap.MemInfomap("--two-level")
# Trigrams represents a path from node A through B to C.
# Add link weight as an optional fourth argument
infomapWrapper.addTrigram(0, 2, 0)
infomapWrapper.addTrigram(0, 2, 1)
infomapWrapper.addTrigram(1, 2, 1)
infomapWrapper.addTrigram(1, 2, 0)
infomapWrapper.addTrigram(1, 2, 3)
infomapWrapper.addTrigram(3, 2, 3)
infomapWrapper.addTrigram(2, 3, 4)
infomapWrapper.addTrigram(3, 2, 4)
infomapWrapper.addTrigram(4, 2, 4)
infomapWrapper.addTrigram(4, 2, 3)
infomapWrapper.addTrigram(4, 3, 3)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#node module")
for node in tree.leafIter():
print("%d %d" % (node.physIndex, node.moduleIndex()))
# Store expanded state network
infomapWrapper = infomap.MemInfomap("--two-level --expanded")
infomapWrapper.addTrigram(0, 2, 0)
infomapWrapper.addTrigram(0, 2, 1)
infomapWrapper.addTrigram(1, 2, 1)
infomapWrapper.addTrigram(1, 2, 0)
infomapWrapper.addTrigram(1, 2, 3)
infomapWrapper.addTrigram(3, 2, 3)
infomapWrapper.addTrigram(2, 3, 4)
infomapWrapper.addTrigram(3, 2, 4)
infomapWrapper.addTrigram(4, 2, 4)
infomapWrapper.addTrigram(4, 2, 3)
infomapWrapper.addTrigram(4, 3, 3)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#previousNode node module")
for node in tree.leafIter():
print("%d %d %d" % (node.stateIndex, node.physIndex, node.moduleIndex()))
infomapWrapper = infomap.MemInfomap("--two-level --expanded")
# from (layer, node) to (layer, node) weight
infomapWrapper.addMultiplexLink(2, 1, 1, 2, 1.0)
infomapWrapper.addMultiplexLink(1, 2, 2, 1, 1.0)
infomapWrapper.addMultiplexLink(3, 2, 2, 3, 1.0)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#layer node module:")
for node in tree.leafIter():
print("%d %d %d" % (node.stateIndex, node.physIndex, node.moduleIndex()))
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
%matplotlib inline
def findCommunities(G):
Partition network with the Infomap algorithm.
Annotates nodes with 'community' id and return number of communities found.
infomapWrapper = infomap.Infomap("--two-level --silent")
print("Building Infomap network from a NetworkX graph...")
for e in G.edges_iter():
infomapWrapper.addLink(*e)
print("Find communities with Infomap...")
infomapWrapper.run();
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
communities = {}
for node in tree.leafIter():
communities[node.originalLeafIndex] = node.moduleIndex()
nx.set_node_attributes(G, 'community', communities)
return tree.numTopModules()
def drawNetwork(G):
# position map
pos = nx.spring_layout(G)
# community ids
communities = [v for k,v in nx.get_node_attributes(G, 'community').items()]
numCommunities = max(communities) + 1
# color map from http://colorbrewer2.org/
cmapLight = colors.ListedColormap(['#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6'], 'indexed', numCommunities)
cmapDark = colors.ListedColormap(['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a'], 'indexed', numCommunities)
# Draw edges
nx.draw_networkx_edges(G, pos)
# Draw nodes
nodeCollection = nx.draw_networkx_nodes(G,
pos = pos,
node_color = communities,
cmap = cmapLight
)
# Set node border color to the darker shade
darkColors = [cmapDark(v) for v in communities]
nodeCollection.set_edgecolor(darkColors)
# Draw node labels
for n in G.nodes_iter():
plt.annotate(n,
xy = pos[n],
textcoords = 'offset points',
horizontalalignment = 'center',
verticalalignment = 'center',
xytext = [0, 0],
color = cmapDark(communities[n])
)
plt.axis('off')
# plt.savefig("karate.png")
plt.show()
G=nx.karate_club_graph()
findCommunities(G)
drawNetwork(G)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple example
Step2: Memory networks
Step3: Overlapping modules
Step4: As seen in the expanded output above, node 2 is represented by four state nodes partitioned into two modules depending on where you come from; if you go to node 2 from node 0 or 1 you are still considered to be in module 1, but if you go to node 2 from node 3 and 4 you are still considered to be in module 0.
Step6: Infomap + NetworkX
|
7,363
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
counts = Counter(text)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = { w : i for i, w in enumerate(vocab, 0)}
int_to_vocab = dict(enumerate(vocab))
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
token_dict = {'.' : "||Period||", ',' : "||Comma||", '"' : "||Quotation_Mark||",\
';' : "||Semicolon||", '!': "||Exclamation_Mark||", '?': "||Question_Mark||", \
'(' : "||Left_Parentheses||", ')' : "||Right_Parentheses||", '--' : "||Dash||", '\n' : "||Return||"}
return token_dict
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
inputs_ = tf.placeholder(tf.int32, shape=[None, None], name='input')
targets_ = tf.placeholder(tf.int32, shape=[None, None], name='targets')
learn_rate_ = tf.placeholder(tf.float32, shape=None, name='learning_rate')
return (inputs_, targets_, learn_rate_)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm])
initial_state = tf.identity(cell.zero_state(batch_size, tf.int32), name="initial_state")
return cell, initial_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
outputs, fs = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(fs, name='final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
embed = get_embed(input_data, vocab_size, embed_dim)
rnn, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(rnn, vocab_size, activation_fn=None, \
weights_initializer = tf.truncated_normal_initializer(stddev=0.1),\
biases_initializer=tf.zeros_initializer())
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
num_batches = int(len(int_text) / (batch_size * seq_length))
num_words = num_batches * batch_size * seq_length
input_data = np.array(int_text[:num_words])
target_data = np.array(int_text[1:num_words+1])
input_batches = np.split(input_data.reshape(batch_size, -1), num_batches, 1)
target_batches = np.split(target_data.reshape(batch_size, -1), num_batches, 1)
#last target value in the last batch is the first input value of the first batch
#print (batches)
target_batches[-1][-1][-1]=input_batches[0][0][0]
return np.array(list(zip(input_batches, target_batches)))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 20
# Batch Size
batch_size = 100
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 10
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name('input:0')
InitialStateTensor = loaded_graph.get_tensor_by_name('initial_state:0')
FinalStateTensor = loaded_graph.get_tensor_by_name('final_state:0')
ProbsTensor = loaded_graph.get_tensor_by_name('probs:0')
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
return int_to_vocab[np.argmax(probabilities)]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[0, dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
7,364
|
<ASSISTANT_TASK:>
Python Code:
train.columns
train.drop(['type',
'mv',
'blockTime',
'difficulty',
'gasLimit_b',
'gasUsed_b',
'reward',
'size',
'totalFee',
'gasShare',
'gweiPaid',
'gweiPaid_b',
'gweiShare',
'txcnt_second'], axis=1, inplace=True)
train.columns
train.drop(['free_t',
'newContract',
'amount_gwei',
'type_enc',
'dayofweek',
'day'], axis=1, inplace=True)
X = train.values
y = y_label
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train.shape, y_train.shape
def rf_regressor(X_train, X_test, y_train, y_test):
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
scores = cross_val_score(rf, X_train, y_train, scoring='r2', cv=5)
print('MSE: {}'.format(mean_squared_error(y_test, y_pred)))
print('R2_score: {}'.format(r2_score(y_test, y_pred)))
print('avg_CV_score: {}'.format(np.mean(scores)))
# write predicted values to csv
p = pd.DataFrame({'y_pred': y_pred})
p.to_csv('./../data/label_pred.csv')
return rf
model = rf_regressor(X_train, X_test, y_train, y_test)
def plot_feature_importance(rf, feature_df):
cols = []
for col in feature_df.columns:
cols.append(col)
feat_scores = pd.DataFrame({'Fraction of Samples Affected' : rf.feature_importances_},
index=cols)
feat_scores = feat_scores.sort_values(by='Fraction of Samples Affected')
feat_scores.plot(kind='barh', color='r', figsize=(6,6))
#plt.xlabel('Importance', fontsize=18)
plt.title('Feature Importance', fontsize=18)
plt.tight_layout()
plt.savefig('./../images/feat_import_pruned.png', dpi=300)
plot_feature_importance(model, train)
y_pred = pd.read_csv('./../data/label_pred.csv')
y_pred.drop('Unnamed: 0', axis=1, inplace=True)
y_pred.head()
y_test.shape
y_pred = y_pred.values.ravel()
y_test.shape, y_pred.shape
result = pd.DataFrame({'y_test': y_test, 'y_pred': y_pred})
result.head()
plt.scatter(result['y_test'], result['y_pred'])
plt.xlim(0,100)
plt.ylim(0,100)
sns.set(style="darkgrid", color_codes=True)
sns.jointplot(x="y_test", y="y_pred", data=result)
plt.xlim(0, 100)
plt.tight_layout()
plt.savefig('./../images/jointplot.png', dpi=300)
sns.residplot(result.y_test, result.y_pred , lowess=True, color="g")
def linear_regression(X_train, X_test, y_train, y_test):
lr = LinearRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
scores = cross_val_score(lr, X_train, y_train, scoring='r2', cv=5)
print('MSE: {}'.format(mean_squared_error(y_test, y_pred)))
print('R2_score: {}'.format(r2_score(y_test, y_pred)))
print('avg_CV_score: {}'.format(np.mean(scores)))
return lr
linear_regression(X_train, X_test, y_train, y_test)
# get summary statistics from statsmodels
model = sm.OLS(y_train, X_train)
result = model.fit()
result.summary()
from numpy.linalg import matrix_rank
X_train.shape
matrix_rank(X_train)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Prune out some more features
Step3: Split data into training and test sets
Step4: Random forest regressor
Step5: Plot predicted values against labels
Step6: It looks like it is overfitting
Step7: All features show to be significant
|
7,365
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import sys
# Load Yabox (from local)
# Comment this line to use the installed version
sys.path.insert(0, '../')
import yabox as yb
# Import the DE implementations
from yabox.algorithms import DE, PDE
print('Yabox version: ', yb.__version__)
DE(lambda x: sum(x**2) + 1, [(-10, 10)] * 5, maxiters=10000).solve(show_progress=True)
def create_loss(p1, p2, p3):
# Prepare here whatever you need. Load parameters,
# read from file, etc
a = p1 * p2
b = p2 / p3
# Define the function to be optimized as an inner function
# that can make use of the other parameters
def f(x):
return 1 + a*x - b*x**2 + 0.01*x**3 + 0.001 * x**4
return f
f = create_loss(5, 2, 0.1)
f(0)
x = np.arange(-150, 150, 0.1)
plt.plot(x, f(x));
xo, yo = DE(f, [(-150, 150)], maxiters=1000).solve(show_progress=True)
xo, yo
fig, ax = plt.subplots()
ax.plot(x, f(x));
ax.plot(xo[0][0], yo[0], '*')
# Control the iteration process
de = DE(f, [(-150, 150)], maxiters=30)
for step in de.geniterator():
idx = step.best_idx
norm_vector = step.population[idx]
best_params = de.denormalize([norm_vector])
print(step.best_fitness, norm_vector, best_params[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basics
Step2: In many scenarios, the function to optimize may depend on many other components or other fixed parameters. It is very convenient to define a function to create your optimizable function. Here is an example of this, where create_loss is a function that prepares the data and returns another function to be optimize
Step3: You can control also the search process. Use geniterator() to get an iterator at the level of the population (by default, .iterator() returns an iterator at the level of individual (thus it iteraters maxiters * popsize times)
|
7,366
|
<ASSISTANT_TASK:>
Python Code:
import spacy
import pandas as pd
%matplotlib inline
from ast import literal_eval
import numpy as np
import re
import json
from nltk.corpus import names
from collections import Counter
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = [16, 6]
plt.style.use('ggplot')
nlp = spacy.load('en')
with open('../middlemarch.txt') as f:
mm = f.read()
textALength = len(mm)
# Get chapter locations
chapterMatches = re.finditer('PRELUDE|CHAPTER|FINALE', mm)
chapterLocations = [match.start() for match in chapterMatches]
chapterLocations.append(textALength) # Add one to account for last chunk.
len(chapterLocations)
def getChapters(text):
chapters = []
for i, loc in enumerate(chapterLocations):
if i != len(chapterLocations)-1:
chapter = mm[loc:chapterLocations[i+1]]
chapters.append(chapter)
return chapters
chapters = getChapters(mm)
chapterLengths = [len(nlp(chapter, tag=False, parse=False, entity=False)) for chapter in chapters]
chapterLengthsSeries = pd.Series(chapterLengths)
# chapterLengthsSeries.plot(kind='bar', title='Chapter Lengths')
with open('../txt/anthologies.json') as f:
rawData = f.read()
df = pd.read_json(rawData)
sum([len(item) for item in df['Locations in A'].values])
def diachronicAnalysis(df, decades=(1950, 2020), bins=chapterLocations, useWordcounts=True):
Turning on useWordcounts makes it so that it's weighted by wordcount.
Turning it off uses raw numbers of quotations.
decades = np.arange(decades[0], decades[1], 10)
# Make a dictionary of decades.
# Values are a list of locations.
decadeDict = {}
for i, row in df.iterrows():
decade = row['Decade']
locationsAndWordcounts = row['Locations in A with Wordcounts']
if decade not in decadeDict:
decadeDict[decade] = locationsAndWordcounts
else:
decadeDict[decade] += locationsAndWordcounts
# Grab the beginnings of quotes.
decadeStartsWeights = {decade: [(item[0][0], item[1])
for item in loc]
for decade, loc in decadeDict.items()}
if useWordcounts:
decadesBinned = {decade:
np.histogram([loc[0] for loc in locations],
bins=bins,
weights=[loc[1] for loc in locations],
range=(0, textALength))[0]
for decade, locations in decadeStartsWeights.items()
if decade in decades}
else:
decadesBinned = {decade:
np.histogram([loc[0] for loc in locations],
bins=bins,
range=(0, textALength))[0]
for decade, locations in decadeStartsWeights.items()
if decade in decades}
decadesDF = pd.DataFrame(decadesBinned).T
#Normalize
decadesDF = decadesDF.div(decadesDF.max(axis=1), axis=0)
return decadesDF
def countWords(locRange):
Counts words in middlemarch, given character ranges.
doc = nlp(mm[locRange[0]:locRange[1]], tag=False, parse=False, entity=False)
return len(doc)
def totalWords(locRangeSet):
Counts total words in a list of location ranges.
return sum([countWords(locRange) for locRange in locRangeSet])
def countsPerSet(locRangeSet):
Returns an augmented location range set that includes word counts.
return [(locRange, countWords(locRange))
for locRange in locRangeSet]
def extractWordcounts(locsAndWordcounts):
Takes pairs of location ranges and wordcounts,
and returns just the wordcounts.
return [item[1] for item in locsAndWordcounts
if len(locsAndWordcounts) > 0]
def synchronicAnalysis(df, bins=chapterLocations, useWordcounts=True):
locs = df['Locations in A'].values
locCounts = [(loc, countWords(loc)) for locSet in locs
for loc in locSet]
starts = [loc[0][0] for loc in locCounts]
counts = [loc[1] for loc in locCounts]
if useWordcounts:
binned = np.histogram(starts, bins=bins,
weights=counts, range=(0, textALength))
else:
binned = np.histogram(starts, bins=bins,
range=(0, textALength))
binnedDF = pd.Series(binned[0])
return binnedDF
def plotDiachronicAnalysis(df):
ylabels = [str(int(decade)) for decade in df.index] + ['2020']
plt.pcolor(df, cmap='gnuplot')
plt.yticks(np.arange(len(df.index)+1), ylabels)
plt.gca().invert_yaxis()
plt.ylabel('Decade')
plt.xlabel('Chapter')
plt.gca().set_xlim((0, len(df.T)))
plt.colorbar(ticks=[])
plt.show()
def plotSynchronicAnalysis(s, useWordcounts=True):
ax = s.plot(kind='bar')
ax.set_xlabel('Chapter')
if useWordcounts:
ax.set_ylabel('Number of Words Quoted')
else:
ax.set_ylabel('Number of Quotations')
df['Quoted Words'] = df['Locations in A'].apply(totalWords)
df['Locations in A with Wordcounts'] = df['Locations in A'].apply(countsPerSet)
df['Wordcounts'] = df['Locations in A with Wordcounts'].apply(extractWordcounts)
df['Wordcounts'].values
wordcounts = []
for countSet in df['Wordcounts'].values:
for count in countSet:
wordcounts.append(count)
pd.Series(wordcounts).hist()
plotSynchronicAnalysis(synchronicAnalysis(df))
plotSynchronicAnalysis(synchronicAnalysis(df, useWordcounts=False), useWordcounts=False)
# Adjusted for the number of words in each chapter
ax = (synchronicAnalysis(df) / chapterLengthsSeries).plot(kind='bar')
ax.set_xlabel('Chapter')
ax.set_ylabel('Words Quoted, Normalized')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Analysis of Anthologies
Step6: Quotation Length Statistics
Step7: Number of Quotes (and words Quoted) by Chapter
|
7,367
|
<ASSISTANT_TASK:>
Python Code:
# Import relevant modules
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
from NPTFit import nptfit # module for performing scan
from NPTFit import create_mask as cm # module for creating the mask
from NPTFit import dnds_analysis # module for analysing the output
from NPTFit import psf_correction as pc # module for determining the PSF correction
n = nptfit.NPTF(tag='GCE_Example')
fermi_data = np.load('fermi_data/fermidata_counts.npy')
fermi_exposure = np.load('fermi_data/fermidata_exposure.npy')
n.load_data(fermi_data, fermi_exposure)
pscmask=np.array(np.load('fermi_data/fermidata_pscmask.npy'), dtype=bool)
analysis_mask = cm.make_mask_total(band_mask = True, band_mask_range = 2,
mask_ring = True, inner = 0, outer = 30,
custom_mask = pscmask)
n.load_mask(analysis_mask)
dif = np.load('fermi_data/template_dif.npy')
iso = np.load('fermi_data/template_iso.npy')
bub = np.load('fermi_data/template_bub.npy')
gce = np.load('fermi_data/template_gce.npy')
dsk = np.load('fermi_data/template_dsk.npy')
n.add_template(dif, 'dif')
n.add_template(iso, 'iso')
n.add_template(bub, 'bub')
n.add_template(gce, 'gce')
n.add_template(dsk, 'dsk')
n.add_poiss_model('dif', '$A_\mathrm{dif}$', fixed=True, fixed_norm=14.67)
n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0,2], False)
n.add_poiss_model('gce', '$A_\mathrm{gce}$', [0,2], False)
n.add_poiss_model('bub', '$A_\mathrm{bub}$', [0,2], False)
n.add_non_poiss_model('gce',
['$A_\mathrm{gce}^\mathrm{ps}$','$n_1^\mathrm{gce}$','$n_2^\mathrm{gce}$','$S_b^{(1), \mathrm{gce}}$'],
[[-6,1],[2.05,30],[-2,1.95],[0.05,40]],
[True,False,False,False])
n.add_non_poiss_model('dsk',
['$A_\mathrm{dsk}^\mathrm{ps}$','$n_1^\mathrm{dsk}$','$n_2^\mathrm{dsk}$','$S_b^{(1), \mathrm{dsk}}$'],
[[-6,1],[2.05,30],[-2,1.95],[0.05,40]],
[True,False,False,False])
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812)
f_ary, df_rho_div_f_ary = pc_inst.f_ary, pc_inst.df_rho_div_f_ary
n.configure_for_scan(f_ary, df_rho_div_f_ary, nexp=1)
n.perform_scan(nlive=100)
from IPython.display import Image
Image(url = "https://imgs.xkcd.com/comics/compiling.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Step 2
Step3: This time we add a non-Poissonian template correlated with the Galactic Center Excess and also one spatially distributed as a thin disk. The latter is designed to account for the unresolved point sources attributed to the disk of the Milky Way (known sources in the 3FGL are masked).
Step4: Step 3
Step5: Step 4
Step6: This can take up to an hour to run. The output of this run will be analyzed in detail in the next example.
|
7,368
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# 使用默认的seaborn设置
import seaborn as sns; sns.set()
np.random.seed(1)
X = np.dot(np.random.random(size=(2, 2)), np.random.normal(size=(2, 200))).T
plt.plot(X[:, 0], X[:, 1], 'o')
plt.axis('equal');
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
print(pca.explained_variance_)
print(pca.components_)
plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.5)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
plt.plot([0, v[0]], [0, v[1]], '-k', lw=3)
plt.axis('equal');
clf = PCA(0.95) # 保持95%的方差
X_trans = clf.fit_transform(X)
print(X.shape)
print(X_trans.shape)
X_new = clf.inverse_transform(X_trans)
plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.2)
plt.plot(X_new[:, 0], X_new[:, 1], 'ob', alpha=0.8)
plt.axis('equal');
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
pca = PCA(2) # 将64个维度投射在2个维度
Xproj = pca.fit_transform(X)
print(X.shape)
print(Xproj.shape)
plt.scatter(Xproj[:, 0], Xproj[:, 1], c=y, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('nipy_spectral', 10))
plt.colorbar();
from fig_code.figures import plot_image_components
sns.set_style('white')
plot_image_components(digits.data[0])
from fig_code.figures import plot_pca_interactive
plot_pca_interactive(digits.data)
sns.set()
pca = PCA().fit(X)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
fig, axes = plt.subplots(8, 8, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
pca = PCA(i + 1).fit(X)
im = pca.inverse_transform(pca.transform(X[20:21]))
ax.imshow(im.reshape((8, 8)), cmap='binary')
ax.text(0.95, 0.05, 'n = {0}'.format(i + 1), ha='right',
transform=ax.transAxes, color='green')
ax.set_xticks([])
ax.set_yticks([])
from IPython.html.widgets import interact
def plot_digits(n_components):
fig = plt.figure(figsize=(8, 8))
plt.subplot(1, 1, 1, frameon=False, xticks=[], yticks=[])
nside = 10
pca = PCA(n_components).fit(X)
Xproj = pca.inverse_transform(pca.transform(X[:nside ** 2]))
Xproj = np.reshape(Xproj, (nside, nside, 8, 8))
total_var = pca.explained_variance_ratio_.sum()
im = np.vstack([np.hstack([Xproj[i, j] for j in range(nside)])
for i in range(nside)])
plt.imshow(im)
plt.grid(False)
plt.title("n = {0}, variance = {1:.2f}".format(n_components, total_var),
size=18)
plt.clim(0, 16)
interact(plot_digits, n_components=[1, 64], nside=[1, 8]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 介绍主成分分析
Step2: 我们可以看出这一组数据有一个明显的趋势和走向。主成分分析(PCA)做的就是去寻找这一组数据中的最基本的轴,然后去解释这些轴是怎样影响数据分布的:
Step3: 我们把这些向量画在这些数据上来直观的看一看这些数字是什么意思:
Step4: 我们注意到一个向量比另一个向量长。从某种意义上来说,它告诉我们这个长向量对应的数据方向比另一个方向要"重要"。方差量化了方向的"重要性"。
Step5: 当舍弃了5%的方差后,我们的数据现在足足被压缩了一半!我们来看一看压缩过后的数据是什么样子的:
Step6: 在图中,浅色的点是原始数据,深色的点是经过投射和压缩后的点。我们可以看到,在舍弃了5%的数据方差之后,数据集中最重要的特征被保留了下来,而且我们的数据总量被压缩了一半!
Step7: 这幅图让我们了解了不同数字之间的差异和关联。更为重要的是,我们找到了一个对64维数据的有效的处理,这个方法让我们不用去了解这些数据的标签,也可以看出这些数字的关联和差异。
Step8: 但是基于像素的表达并不是唯一的选择。我们还可以选择其他基函数,比如
Step9: 我们可以看到,我们仅仅用6个PCA成分的组合,就得到了相当不错的对输入数据的恢复效果!
Step10: 从图中我们可以看到,两维的投影让我们丢掉了很多信息,我们几乎需要保留20个成分去保持90%的方差。研究高维数据的这种图片会帮助您理解数据的冗余表达。
Step11: 再让我们用IPython的interact功能去看一看一些图片的重构:
|
7,369
|
<ASSISTANT_TASK:>
Python Code:
s = 'Fluent'
L = [10, 20, 30, 40, 50]
print(list(s)) # list constructor iterates over its argument
a, b, *middle, c = L # tuple unpacking iterates over right side
print((a, b, c))
for i in L:
print(i, end=' ')
len(s), len(L)
s.__len__(), L.__len__()
a = 2
b = 3
a * b, a.__mul__(b)
L = [1, 2, 3]
L.append(L)
L
x = 2**.5
x
format(x, '.3f')
from datetime import datetime
agora = datetime.now()
print(agora)
print(format(agora, '%H:%M'))
'{1:%H}... {0:.3f}!'.format(x, agora)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sizing with len()
Step2: Arithmetic
Step3: A simple but full-featured Pythonic class
|
7,370
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = ?
testing_file = ?
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_test, y_test = test['features'], test['labels']
### Replace each question mark with the appropriate value.
# TODO: Number of training examples
n_train = ?
# TODO: Number of testing examples.
n_test = ?
# TODO: What's the shape of an traffic sign image?
image_shape = ?
# TODO: How many unique classes/labels there are in the dataset.
n_classes = ?
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
### Preprocess the data here.
### Feel free to use as many code cells as needed.
### Generate additional data (OPTIONAL!)
### and split the data into training/validation/testing sets here.
### Feel free to use as many code cells as needed.
### Define your architecture here.
### Feel free to use as many code cells as needed.
### Train your model here.
### Feel free to use as many code cells as needed.
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
### Run the predictions here.
### Feel free to use as many code cells as needed.
### Visualize the softmax probabilities here.
### Feel free to use as many code cells as needed.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include
Step3: Step 2
Step4: Question 1
Step5: Question 2
Step6: Question 3
Step7: Question 4
Step8: Question 6
Step9: Question 7
|
7,371
|
<ASSISTANT_TASK:>
Python Code:
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import io
import os.path
import re
import tarfile
import smart_open
def extract_documents(url='https://cs.nyu.edu/~roweis/data/nips12raw_str602.tgz'):
fname = url.split('/')[-1]
# Download the file to local storage first.
# We can't read it on the fly because of
# https://github.com/RaRe-Technologies/smart_open/issues/331
if not os.path.isfile(fname):
with smart_open.open(url, "rb") as fin:
with smart_open.open(fname, 'wb') as fout:
while True:
buf = fin.read(io.DEFAULT_BUFFER_SIZE)
if not buf:
break
fout.write(buf)
with tarfile.open(fname, mode='r:gz') as tar:
# Ignore directory entries, as well as files like README, etc.
files = [
m for m in tar.getmembers()
if m.isfile() and re.search(r'nipstxt/nips\d+/\d+\.txt', m.name)
]
for member in sorted(files, key=lambda x: x.name):
member_bytes = tar.extractfile(member).read()
yield member_bytes.decode('utf-8', errors='replace')
docs = list(extract_documents())
print(len(docs))
print(docs[0][:500])
# Tokenize the documents.
from nltk.tokenize import RegexpTokenizer
# Split the documents into tokens.
tokenizer = RegexpTokenizer(r'\w+')
for idx in range(len(docs)):
docs[idx] = docs[idx].lower() # Convert to lowercase.
docs[idx] = tokenizer.tokenize(docs[idx]) # Split into words.
# Remove numbers, but not words that contain numbers.
docs = [[token for token in doc if not token.isnumeric()] for doc in docs]
# Remove words that are only one character.
docs = [[token for token in doc if len(token) > 1] for doc in docs]
# Lemmatize the documents.
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]
# Compute bigrams.
from gensim.models import Phrases
# Add bigrams and trigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if '_' in token:
# Token is a bigram, add to document.
docs[idx].append(token)
# Remove rare and common tokens.
from gensim.corpora import Dictionary
# Create a dictionary representation of the documents.
dictionary = Dictionary(docs)
# Filter out words that occur less than 20 documents, or more than 50% of the documents.
dictionary.filter_extremes(no_below=20, no_above=0.5)
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in docs]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
# Train LDA model.
from gensim.models import LdaModel
# Set training parameters.
num_topics = 10
chunksize = 2000
passes = 20
iterations = 400
eval_every = None # Don't evaluate model perplexity, takes too much time.
# Make a index to word dictionary.
temp = dictionary[0] # This is only to "load" the dictionary.
id2word = dictionary.id2token
model = LdaModel(
corpus=corpus,
id2word=id2word,
chunksize=chunksize,
alpha='auto',
eta='auto',
iterations=iterations,
num_topics=num_topics,
passes=passes,
eval_every=eval_every
)
top_topics = model.top_topics(corpus) #, num_words=20)
# Average topic coherence is the sum of topic coherences of all topics, divided by the number of topics.
avg_topic_coherence = sum([t[1] for t in top_topics]) / num_topics
print('Average topic coherence: %.4f.' % avg_topic_coherence)
from pprint import pprint
pprint(top_topics)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The purpose of this tutorial is to demonstrate how to train and tune an LDA model.
Step2: So we have a list of 1740 documents, where each document is a Unicode string.
Step3: Pre-process and vectorize the documents
Step4: We use the WordNet lemmatizer from NLTK. A lemmatizer is preferred over a
Step5: We find bigrams in the documents. Bigrams are sets of two adjacent words.
Step6: We remove rare words and common words based on their document frequency.
Step7: Finally, we transform the documents to a vectorized form. We simply compute
Step8: Let's see how many tokens and documents we have to train on.
Step9: Training
Step10: We can compute the topic coherence of each topic. Below we display the
|
7,372
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import KFold, cross_val_score
import numpy as np
dataset = pd.read_csv('heart.csv')
dataset.head()
dataset.hist()
dataset.info()
X = dataset.iloc[:,0:12]
Y = dataset.iloc[:,13]
X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=0.8, test_size= 0.2, random_state=0)
model = DecisionTreeClassifier(criterion='gini', max_depth=4, min_samples_split= 20)
model.fit(X_train, y_train)
predicao = model.predict(X_test)
accuracy = model.score(X_test, y_test)*100
print('Accuracy: %s%%' % accuracy)
maxAccuracy = 0
valueI, valueJ = '', ''
for i in range(2, 20):
for j in range(2, 20):
model = DecisionTreeClassifier(criterion='gini', max_depth=i, min_samples_split= j)
model.fit(X_train, y_train)
predicao = model.predict(X_test)
accuracy = model.score(X_test, y_test)*100
if accuracy > maxAccuracy:
maxAccuracy = accuracy
valueI, valueJ = i,j
print('I: {0}, J:{1} Accuracy: {2}%'.format(valueI, valueJ, maxAccuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Divisão do dataset em atributos e classes
Step2: Divisão do dataset em treino e teste
Step3: Deifinição do modelo
Step4: Treinamento do modelo
Step5: Predição utilizando o X_test
Step6: Definição da acurácia
|
7,373
|
<ASSISTANT_TASK:>
Python Code:
# The action to take upon a certain event is usually specified at the "source"
b = Button()
b.mouse_down.connect(some_callback)
...
def some_callback(event):
...
from flexx import react
@react.connect('name')
def greet(n):
print('hello %s!' % n)
@react.connect('first_name', 'last_name')
def name(first, last):
return '%s %s' % (first, last)
@react.input
def first_name(n='John'):
assert isinstance(n, str)
return n.capitalize()
@react.input
def last_name(n='Doe'):
assert isinstance(n, str)
return n.capitalize()
# For the sake of the story, we defined the signals out of order, so we need to connect them
name.connect(); greet.connect()
first_name() # get signal value
first_name('jane') # set signal value (for input signals)
class Item(react.HasSignals):
@react.input
def name(n):
return str(n)
class Collection(react.HasSignals):
@react.input
def items(items):
assert all([isinstance(i, Item) for i in items])
return tuple(list(items))
@react.input
def ref(i):
assert isinstance(i, Item)
return i
itemA, itemB, itemC, itemD = Item(name='A'), Item(name='B'), Item(name='C'), Item(name='D')
C1 = Collection(items=(itemA, itemB))
C2 = Collection(items=(itemC, itemD))
itemB.name()
C1.items()
class Collection2(Collection):
@react.connect('ref.name')
def show_ref_name(name):
print('The ref is %s' % name)
@react.connect('items.*.name')
def show_index(*names):
print('index: '+ ', '.join(names))
itemA, itemB, itemC, itemD = Item(name='A'), Item(name='B'), Item(name='C'), Item(name='D')
C1 = Collection2(items=(itemA, itemB))
C2 = Collection2(items=(itemC, ))
C1.ref(itemA)
C1.ref(itemD)
itemD.name('D-renamed')
C2.items([itemC, itemD])
itemC.name('C-renamed')
@react.input
def foo(v):
return str(v)
@react.lazy('foo')
def bar(v):
print('update bar')
return v * 10 # imagine that this is an expensive operation
foo('hello') # Does not trigger bar
foo('heya')
foo('hi')
bar() # this is where bar gets updated
bar() # foo has not changed; cached value is returned
@react.input
def some_value(v=0):
return float(v)
some_value(0) # init
@react.connect('some_value')
def show_diff(s):
print('diff: ', s - some_value.last_value) # note: we might rename this to previous_value
some_value(10)
some_value(12)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reactive Programming (in flexx)
Step2: Signals yield new values, thereby transforming or combining the upstream signals. Also, you can connect to as many signals as necessary
Step3: Input signals can be called with an argument to set their value
Step4: Observations
Step5: Dynamism - connect to signals of signals
Step6: Lazy evaluation
Step7: Signal history
|
7,374
|
<ASSISTANT_TASK:>
Python Code:
# Start the Spark Session
# This uses local mode for simplicity
# the use of findspark is optional
# install pyspark if needed
# ! pip install pyspark
# import findspark
# findspark.init("/home/luca/Spark/spark-3.3.0-bin-hadoop3")
from pyspark.sql import SparkSession
spark = (SparkSession.builder
.appName("PySpark histograms")
.master("local[*]")
.getOrCreate()
)
num_events = 100
scale = 100
seed = 4242
df = spark.sql(f"select random({seed}) * {scale} as random_value from range({num_events})")
df.show(5)
# import the computeHistogram function
# see implementation details at:
# https://github.com/LucaCanali/Miscellaneous/blob/master/Spark_Notes/Spark_Histograms/python/sparkhistogram/histogram.py
# requires the package sparkhistogram
! pip install sparkhistogram
from sparkhistogram import computeHistogram
# Compute the histogram using the computeHistogram function
histogram = computeHistogram(df, "random_value", -20, 90, 11)
# Alternative syntax: compute the histogram using transform on the DataFrame
# requires Spark 3.3.0 or higher
# histogram = df.transform(computeHistogram, "random_value", -20, 90, 11)
# this triggers the computation as show() is an action
histogram.show()
# Fetch the histogram data into a Pandas DataFrame for visualization
# At this stage data is reduced to a small number of rows (one row per bin)
# so it can be easily handled by the local machine/driver
# toPandas() is an action and triggers the computation
hist_pandasDF = histogram.toPandas()
hist_pandasDF
# Optionally normalize the event count into a frequency
# dividing by the total number of events
hist_pandasDF["frequency"] = hist_pandasDF["count"] / sum(hist_pandasDF["count"])
hist_pandasDF
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]})
f, ax = plt.subplots()
# histogram data
x = hist_pandasDF["value"]
y = hist_pandasDF["count"]
# bar plot
ax.bar(x, y, width = 3.0, color='red')
ax.set_xlabel("Bucket values")
ax.set_ylabel("Event count")
ax.set_title("Distribution of event counts")
# Label for the resonances spectrum peaks
txt_opts = {'horizontalalignment': 'center',
'verticalalignment': 'center',
'transform': ax.transAxes}
plt.show()
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]})
f, ax = plt.subplots()
# histogram data
x = hist_pandasDF["value"]
y = hist_pandasDF["frequency"]
# bar plot
ax.bar(x, y, width = 3.0, color='blue')
ax.set_xlabel("Bucket values")
ax.set_ylabel("Event frequency")
ax.set_title("Distribution of event frequencies")
# Label for the resonances spectrum peaks
txt_opts = {'horizontalalignment': 'center',
'verticalalignment': 'center',
'transform': ax.transAxes}
plt.show()
spark.stop()
def computeHistogram(df: "DataFrame", value_col: str, min: float, max: float, bins: int) -> "DataFrame":
This is a dataframe function to compute the count/frequecy histogram of a column
Parameters
----------
df: the dataframe with the data to compute
value_col: column name on which to compute the histogram
min: minimum value in the histogram
max: maximum value in the histogram
bins: number of histogram buckets to compute
Output DataFrame
----------------
bucket: the bucket number, range from 1 to bins (included)
value: midpoint value of the given bucket
count: number of values in the bucket
step = (max - min) / bins
# this will be used to fill in for missing buckets, i.e. buckets with no corresponding values
df_buckets = spark.sql(f"select id+1 as bucket from range({bins})")
histdf = (df
.selectExpr(f"width_bucket({value_col}, {min}, {max}, {bins}) as bucket")
.groupBy("bucket")
.count()
.join(df_buckets, "bucket", "right_outer") # add missing buckets and remove buckets out of range
.selectExpr("bucket", f"{min} + (bucket - 1/2) * {step} as value", # use center value of the buckets
"nvl(count, 0) as count") # buckets with no values will have a count of 0
.orderBy("bucket")
)
return histdf
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate a DataFrame with toy data for demo purposes
Step2: Compute the histogram
Step3: Histogram plotting
Step5: Note added
|
7,375
|
<ASSISTANT_TASK:>
Python Code:
import pandas
import numpy
import toyplot
import toyplot.pdf
import toyplot.png
import toyplot.svg
print('Pandas version: ', pandas.__version__)
print('Numpy version: ', numpy.__version__)
print('Toyplot version: ', toyplot.__version__)
column_names = ['MPG',
'Cylinders',
'Displacement',
'Horsepower',
'Weight',
'Acceleration',
'Model Year',
'Origin',
'Car Name']
data = pandas.read_table('auto-mpg.data',
delim_whitespace=True,
names=column_names,
index_col=False)
data['Make'] = data['Car Name'].str.split().str.get(0)
data.ix[data['Make'] == 'chevroelt', 'Make'] = 'chevrolet'
data.ix[data['Make'] == 'chevy', 'Make'] = 'chevrolet'
data.ix[data['Make'] == 'maxda', 'Make'] = 'mazda'
data.ix[data['Make'] == 'mercedes-benz', 'Make'] = 'mercedes'
data.ix[data['Make'] == 'vokswagen', 'Make'] = 'volkswagen'
data.ix[data['Make'] == 'vw', 'Make'] = 'volkswagen'
average_mpg_per_make = data.pivot_table(columns='Make',
values='MPG',
aggfunc='mean')
len(average_mpg_per_make.index)
count_mpg_per_make = data.pivot_table(columns='Make',
values='MPG',
aggfunc='count')
filtered_mpg = average_mpg_per_make[count_mpg_per_make >= 10]
filtered_mpg
canvas = toyplot.Canvas('4in', '2.6in')
axes = canvas.cartesian(bounds=(41,-2,2,-58),
ylabel = 'Average MPG')
axes.plot(filtered_mpg)
# Label the x axis on the make. This is a bit harder than it should be.
axes.x.ticks.locator = \
toyplot.locator.Explicit(labels=filtered_mpg.index)
axes.x.ticks.labels.angle = 45
# It's usually best to make the y-axis 0-based.
axes.y.domain.min = 0
toyplot.pdf.render(canvas, 'XY_Trend_Bad.pdf')
toyplot.svg.render(canvas, 'XY_Trend_Bad.svg')
toyplot.png.render(canvas, 'XY_Trend_Bad.png', scale=5)
canvas = toyplot.Canvas('4in', '2.6in')
axes = canvas.cartesian(bounds=(41,-2,2,-58),
ylabel = 'Average MPG')
axes.bars(filtered_mpg)
# Label the x axis on the make. This is a bit harder than it should be.
axes.x.ticks.locator = \
toyplot.locator.Explicit(labels=filtered_mpg.index)
axes.x.ticks.labels.angle = 45
# It's usually best to make the y-axis 0-based.
axes.y.domain.min = 0
toyplot.pdf.render(canvas, 'Bar.pdf')
toyplot.svg.render(canvas, 'Bar.svg')
toyplot.png.render(canvas, 'Bar.png', scale=5)
sorted_mpg = filtered_mpg.sort_values(ascending=False)
canvas = toyplot.Canvas('4in', '2.6in')
axes = canvas.cartesian(bounds=(41,-2,2,-58),
ylabel = 'Average MPG')
axes.bars(sorted_mpg)
# Label the x axis on the make. This is a bit harder than it should be.
axes.x.ticks.locator = \
toyplot.locator.Explicit(labels=sorted_mpg.index)
axes.x.ticks.labels.angle = 45
# It's usually best to make the y-axis 0-based.
axes.y.domain.min = 0
toyplot.pdf.render(canvas, 'Bar_Sorted.pdf')
toyplot.svg.render(canvas, 'Bar_Sorted.svg')
toyplot.png.render(canvas, 'Bar_Sorted.png', scale=5)
sorted_mpg = filtered_mpg.sort_values(ascending=True)
canvas = toyplot.Canvas('4in', '2.6in')
axes = canvas.cartesian(bounds=(70,-2,2,-44),
xlabel = 'Average MPG')
axes.bars(sorted_mpg,
along='y')
# Label the y axis on the make. This is a bit harder than it should be.
axes.y.ticks.locator = \
toyplot.locator.Explicit(labels=sorted_mpg.index)
axes.y.ticks.labels.angle = -90
# It's usually best to make the y-axis 0-based.
axes.x.domain.min = 0
toyplot.pdf.render(canvas, 'Bar_Rotated.pdf')
toyplot.svg.render(canvas, 'Bar_Rotated.svg')
toyplot.png.render(canvas, 'Bar_Rotated.png', scale=5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in the "auto" dataset. This is a fun collection of data on cars manufactured between 1970 and 1982. The source for this data can be found at https
Step2: For this analysis I am going to group data by the car maker. The make is not directly stored in the data, but all the names start with the make, so extract the first word in that column.
Step3: The data has some inconsistencies with the make strings (misspellings or alternate spellings). Do some simple fixes.
Step4: In this plot we are going to show the average miles per gallon (MPG) rating for each car maker, and to be super cool we are going to order by average MPG. We can use the pivot_table feature of pandas to get this information from the data. (Excel and other spreadsheets have similar functionality.)
Step5: There are many different makers represented in this data set, but several have only a few cars and perhaps are therefore not a signficant sample. Filter out the car makers that have fewer than 10 entries in the data. (Mostly I'm doing this to make these examples fit better even though it works OK with all the data, too.)
Step6: Now use toyplot to plot 1D histograms for each of these makes.
Step7: A better approach for this data is to use a bar chart.
Step8: That is good, but the ordering is arbitrary (alphebetical). It would be even better if the bars were sorted by size.
Step9: Bar charts also afford the ability to change the orientation, which can help with layout, labels, and space utilization.
|
7,376
|
<ASSISTANT_TASK:>
Python Code:
import os
# The Vertex AI Workbench Notebook product has specific requirements
IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME")
IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(
"/opt/deeplearning/metadata/env_version"
)
# Vertex AI Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_WORKBENCH_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG -q
! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG -q
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Vertex AI Workbench, then don't execute this code
IS_COLAB = False
if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv(
"DL_ANACONDA_HOME"
):
if "google.colab" in sys.modules:
IS_COLAB = True
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
BUCKET_URI = f"gs://{BUCKET_NAME}"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
BUCKET_URI = "gs://" + BUCKET_NAME
! gsutil mb -l $REGION $BUCKET_URI
! gsutil ls -al $BUCKET_URI
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your service account from gcloud
if not IS_COLAB:
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].replace("*", "").strip()
if IS_COLAB:
shell_output = ! gcloud projects describe $PROJECT_ID
project_number = shell_output[-1].split(":")[1].strip().replace("'", "")
SERVICE_ACCOUNT = f"{project_number}-compute@developer.gserviceaccount.com"
print("Service Account:", SERVICE_ACCOUNT)
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_URI
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_URI
import google.cloud.aiplatform as aip
import google.cloud.aiplatform_v1beta1 as aip_beta
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_metadata_client():
client = aip_beta.MetadataServiceClient(client_options=client_options)
return client
clients = {}
clients["metadata"] = create_metadata_client()
for client in clients.items():
print(client)
metadata_store = clients["metadata"].create_metadata_store(
parent=PARENT, metadata_store_id="my-metadata-store"
)
metadata_store_id = str(metadata_store.result())[7:-2]
print(metadata_store_id)
schemas = clients["metadata"].list_metadata_schemas(parent=metadata_store_id)
for schema in schemas:
print(schema)
from google.cloud.aiplatform_v1beta1.types import Artifact
artifact_item = Artifact(
display_name="my_example_artifact",
uri="my_url",
labels={"my_label": "value"},
schema_title="system.Artifact",
metadata={"param": "value"},
)
artifact = clients["metadata"].create_artifact(
parent=metadata_store_id,
artifact=artifact_item,
artifact_id="myartifactid",
)
print(artifact)
artifacts = clients["metadata"].list_artifacts(parent=metadata_store_id)
for _artifact in artifacts:
print(_artifact)
from google.cloud.aiplatform_v1beta1.types import Execution
execution = clients["metadata"].create_execution(
parent=metadata_store_id,
execution=Execution(
display_name="my_execution",
schema_title="system.CustomJobExecution",
metadata={"value": "param"},
),
execution_id="myexecutionid",
)
print(execution)
executions = clients["metadata"].list_executions(parent=metadata_store_id)
for _execution in executions:
print(_execution)
from google.cloud.aiplatform_v1beta1.types import Context
context = clients["metadata"].create_context(
parent=metadata_store_id,
context=Context(
display_name="my_context",
labels=[{"my_label", "my_value"}],
schema_title="system.Pipeline",
metadata={"param": "value"},
),
context_id="mycontextid",
)
print(context)
contexts = clients["metadata"].list_contexts(parent=metadata_store_id)
for _context in contexts:
print(_context)
from google.cloud.aiplatform_v1beta1.types import Event
clients["metadata"].add_execution_events(
execution=execution.name,
events=[
Event(
artifact=artifact.name,
type_=Event.Type.INPUT,
labels={"my_label": "my_value"},
)
],
)
clients["metadata"].add_context_artifacts_and_executions(
context=context.name, artifacts=[artifact.name], executions=[execution.name]
)
subgraph = clients["metadata"].query_context_lineage_subgraph(context=context.name)
print(subgraph)
clients["metadata"].delete_artifact(name=artifact.name)
clients["metadata"].delete_execution(name=execution.name)
clients["metadata"].delete_context(name=context.name)
from kfp.v2 import compiler, dsl
from kfp.v2.dsl import (Artifact, Dataset, Input, Metrics, Model, Output,
OutputPath, component, pipeline)
@component(
packages_to_install=["google-cloud-bigquery", "pandas", "pyarrow"],
base_image="python:3.9",
output_component_file="create_dataset.yaml",
)
def get_dataframe(bq_table: str, output_data_path: OutputPath("Dataset")):
from google.cloud import bigquery
bqclient = bigquery.Client()
table = bigquery.TableReference.from_string(bq_table)
rows = bqclient.list_rows(table)
dataframe = rows.to_dataframe(
create_bqstorage_client=True,
)
dataframe = dataframe.sample(frac=1, random_state=2)
dataframe.to_csv(output_data_path)
@component(
packages_to_install=["sklearn", "pandas", "joblib"],
base_image="python:3.9",
output_component_file="beans_model_component.yaml",
)
def sklearn_train(
dataset: Input[Dataset], metrics: Output[Metrics], model: Output[Model]
):
import pandas as pd
from joblib import dump
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
df = pd.read_csv(dataset.path)
labels = df.pop("Class").tolist()
data = df.values.tolist()
x_train, x_test, y_train, y_test = train_test_split(data, labels)
skmodel = DecisionTreeClassifier()
skmodel.fit(x_train, y_train)
score = skmodel.score(x_test, y_test)
print("accuracy is:", score)
metrics.log_metric("accuracy", (score * 100.0))
metrics.log_metric("framework", "Scikit Learn")
metrics.log_metric("dataset_size", len(df))
dump(skmodel, model.path + ".joblib")
@component(
packages_to_install=["google-cloud-aiplatform"],
base_image="python:3.9",
output_component_file="beans_deploy_component.yaml",
)
def deploy_model(
model: Input[Model],
project: str,
region: str,
vertex_endpoint: Output[Artifact],
vertex_model: Output[Model],
):
from google.cloud import aiplatform
aiplatform.init(project=project, location=region)
deployed_model = aiplatform.Model.upload(
display_name="beans-model-pipeline",
artifact_uri=model.uri.replace("model", ""),
serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-24:latest",
)
endpoint = deployed_model.deploy(machine_type="n1-standard-4")
# Save data to the output params
vertex_endpoint.uri = endpoint.resource_name
vertex_model.uri = deployed_model.resource_name
PIPELINE_ROOT = f"{BUCKET_URI}/pipeline_root/3step"
@dsl.pipeline(
# Default pipeline root. You can override it when submitting the pipeline.
pipeline_root=PIPELINE_ROOT,
# A name for the pipeline.
name="mlmd-pipeline",
)
def pipeline(
bq_table: str = "",
output_data_path: str = "data.csv",
project: str = PROJECT_ID,
region: str = REGION,
):
dataset_task = get_dataframe(bq_table)
model_task = sklearn_train(dataset_task.output)
deploy_model(model=model_task.outputs["model"], project=project, region=region)
NOW = datetime.now().isoformat().replace(".", ":")[:-7]
compiler.Compiler().compile(pipeline_func=pipeline, package_path="mlmd_pipeline.json")
run1 = aip.PipelineJob(
display_name="mlmd-pipeline",
template_path="mlmd_pipeline.json",
job_id="mlmd-pipeline-small-{}".format(TIMESTAMP),
parameter_values={"bq_table": "sara-vertex-demos.beans_demo.small_dataset"},
enable_caching=True,
)
run2 = aip.PipelineJob(
display_name="mlmd-pipeline",
template_path="mlmd_pipeline.json",
job_id="mlmd-pipeline-large-{}".format(TIMESTAMP),
parameter_values={"bq_table": "sara-vertex-demos.beans_demo.large_dataset"},
enable_caching=True,
)
run1.run()
run2.run()
run1.delete()
run2.delete()
! rm -f mlmd_pipeline.json *.yaml
df = aip.get_pipeline_df(pipeline="mlmd-pipeline")
print(df)
import matplotlib.pyplot as plt
plt.plot(df["metric.dataset_size"], df["metric.accuracy"], label="Accuracy")
plt.title("Accuracy and dataset size")
plt.legend(loc=4)
plt.show()
FILTER = f'create_time >= "{NOW}" AND state = LIVE'
artifact_req = {
"parent": metadata_store_id,
"filter": FILTER,
}
artifacts = clients["metadata"].list_artifacts(artifact_req)
for _artifact in artifacts:
print(_artifact)
clients["metadata"].delete_artifact(name=_artifact.name)
clients["metadata"].delete_metadata_store(name=metadata_store_id)
delete_bucket = False
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the kernel
Step2: Before you begin
Step3: Region
Step4: Timestamp
Step5: Authenticate your Google Cloud account
Step6: Create a Cloud Storage bucket
Step7: Only if your bucket doesn't already exist
Step8: Finally, validate access to your Cloud Storage bucket by examining its contents
Step9: Service Account
Step10: Set service account access for Vertex AI Pipelines
Step11: Set up variables
Step12: Import Vertex AI SDK
Step13: Vertex AI constants
Step14: Set up clients
Step15: Introduction to Vertex AI Metadata
Step16: List metadata schemas
Step17: Create an Artifact resource
Step18: List Artifact resources in a Metadatastore
Step19: Create an Execution resource
Step20: List Execution resources in a Metadatastore
Step21: Create a Context resource
Step22: List Context resources in a Metadatastore
Step23: Add events to Execution resource
Step24: Combine Artifacts and Executions into a Context
Step25: Query a context
Step26: Delete an Artifact resource
Step27: Delete an Execution resource
Step28: Delete a Context resource
Step29: Introduction to tracking ML Metadata in a Vertex AI Pipeline
Step30: Creating a 3-step pipeline with custom components
Step31: Construct and compile the pipeline
Step32: Compile and execute two runs of the pipeline
Step33: Compare the pipeline runs
Step34: Visualize the pipeline runs
Step35: Quering your Metadatastore resource
Step36: Delete a MetadataStore resource
Step37: Cleaning up
|
7,377
|
<ASSISTANT_TASK:>
Python Code:
# remove after testing
%load_ext autoreload
%autoreload 2
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from itertools import product
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from mclearn.classifier import (train_classifier,
grid_search_logistic,
grid_search_svm_poly,
grid_search_svm_rbf,
learning_curve)
from mclearn.preprocessing import balanced_train_test_split
from mclearn.tools import results_exist, load_results
from mclearn.viz import plot_learning_curve, plot_average_learning_curve, plot_validation_accuracy_heatmap
%matplotlib inline
sns.set_style('ticks')
fig_dir = '../thesis/figures/'
target_col = 'class'
sdss_features = ['psfMag_r_w14', 'psf_u_g_w14', 'psf_g_r_w14', 'psf_r_i_w14',
'psf_i_z_w14', 'petroMag_r_w14', 'petro_u_g_w14', 'petro_g_r_w14',
'petro_r_i_w14', 'petro_i_z_w14', 'petroRad_r']
vstatlas_features = ['rmagC', 'umg', 'gmr', 'rmi', 'imz', 'rmw1', 'w1m2']
sdss = pd.read_hdf('../data/sdss.h5', 'sdss')
vstatlas = pd.read_hdf('../data/vstatlas.h5', 'vstatlas')
X_sdss, _, y_sdss, _ = balanced_train_test_split(
sdss[sdss_features], sdss[target_col], train_size=10000, test_size=0, random_state=2)
X_vstatlas, _, y_vstatlas, _ = balanced_train_test_split(
vstatlas[vstatlas_features], vstatlas[target_col], train_size=2360, test_size=0, random_state=2)
sdss_rbf_path = '../pickle/04_learning_curves/sdss_rbf_scores.pickle'
sdss_rbf_heat = fig_dir + '4_expt1/sdss_grid_rbf.pdf'
sdss_poly_path = '../pickle/04_learning_curves/sdss_poly_scores.pickle'
sdss_poly_heat = fig_dir + '4_expt1/sdss_grid_poly.pdf'
sdss_logistic_path = '../pickle/04_learning_curves/sdss_logistic_scores.pickle'
sdss_logistic_heat = fig_dir + '4_expt1/sdss_grid_logistic.pdf'
sdss_paths = [sdss_rbf_path, sdss_poly_path, sdss_logistic_path]
vstatlas_rbf_path = '../pickle/04_learning_curves/vstatlas_rbf_scores.pickle'
vstatlas_rbf_heat = fig_dir + '4_expt1/vstatlas_grid_rbf.pdf'
vstatlas_poly_path = '../pickle/04_learning_curves/vstatlas_poly_scores.pickle'
vstatlas_poly_heat = fig_dir + '4_expt1/vstatlas_grid_poly.pdf'
vstatlas_logistic_path = '../pickle/04_learning_curves/vstatlas_logistic_scores.pickle'
vstatlas_logistic_heat = fig_dir + '4_expt1/vstatlas_grid_logistic.pdf'
vstatlas_paths = [vstatlas_rbf_path, vstatlas_poly_path, vstatlas_logistic_path]
logistic_labels = ['Degree 1, OVR, L1-norm',
'Degree 1, OVR, L2-norm',
'Degree 1, Multinomial, L2-norm',
'Degree 2, OVR, L1-norm',
'Degree 2, OVR, L2-norm',
'Degree 2, Multinomial, L2-norm',
'Degree 3, OVR, L1-norm',
'Degree 3, OVR, L2-norm',
'Degree 3, Multinomial, L2-norm']
poly_labels = ['Degree 1, OVR, Squared Hinge, L1-norm',
'Degree 1, OVR, Squared Hinge, L2-norm',
'Degree 1, OVR, Hinge, L2-norm',
'Degree 1, Crammer-Singer',
'Degree 2, OVR, Squared Hinge, L1-norm',
'Degree 2, OVR, Squared Hinge, L2-norm',
'Degree 2, OVR, Hinge, L2-norm',
'Degree 2, Crammer-Singer',
'Degree 3, OVR, Squared Hinge, L1-norm',
'Degree 3, OVR, Squared Hinge, L2-norm',
'Degree 3, OVR, Hinge, L2-norm',
'Degree 3, Crammer-Singer']
C_rbf_range = np.logspace(-2, 10, 13)
C_range = np.logspace(-6, 6, 13)
gamma_range = np.logspace(-9, 3, 13)
if not results_exist(sdss_rbf_path):
grid_search_svm_rbf(X_sdss, y_sdss, pickle_path=sdss_rbf_path)
if not results_exist(sdss_poly_path):
grid_search_svm_poly(X_sdss, y_sdss, pickle_path=sdss_poly_path)
if not results_exist(sdss_logistic_path):
grid_search_logistic(X_sdss, y_sdss, pickle_path=sdss_logistic_path)
sdss_rbf, sdss_poly, sdss_logistic = load_results(sdss_paths)
if not results_exist(vstatlas_rbf_path):
grid_search_svm_rbf(X_vstatlas, y_vstatlas, pickle_path=vstatlas_rbf_path)
if not results_exist(vstatlas_poly_path):
grid_search_svm_poly(X_vstatlas, y_vstatlas, pickle_path=vstatlas_poly_path)
if not results_exist(vstatlas_logistic_path):
grid_search_logistic(X_vstatlas, y_vstatlas, pickle_path=vstatlas_logistic_path)
vstatlas_rbf, vstatlas_poly, vstatlas_logistic = load_results(vstatlas_paths)
fig = plt.figure(figsize=(7, 3))
im = plot_validation_accuracy_heatmap(sdss_logistic, x_range=C_range, x_label='$C$', power10='x')
plt.yticks(np.arange(0, 9), logistic_labels)
plt.tick_params(top='off', right='off')
plt.colorbar(im)
fig.savefig(sdss_logistic_heat, bbox_inches='tight')
fig = plt.figure(figsize=(7, 3))
im = plot_validation_accuracy_heatmap(vstatlas_logistic, x_range=C_range, x_label='$C$', power10='x')
plt.yticks(np.arange(0, 9), logistic_labels)
plt.tick_params(top='off', right='off')
plt.colorbar(im)
fig.savefig(vstatlas_logistic_heat, bbox_inches='tight')
fig = plt.figure(figsize=(9, 3.5))
im = plot_validation_accuracy_heatmap(sdss_poly, x_range=C_range, x_label='$C$', power10='x')
plt.yticks(np.arange(0, 12), poly_labels)
plt.tick_params(top='off', right='off')
plt.colorbar(im)
fig.savefig(sdss_poly_heat, bbox_inches='tight')
fig = plt.figure(figsize=(9, 3.5))
im = plot_validation_accuracy_heatmap(vstatlas_poly, x_range=C_range, x_label='$C$', power10='x')
plt.yticks(np.arange(0, 12), poly_labels)
plt.tick_params(top='off', right='off')
plt.colorbar(im)
fig.savefig(vstatlas_poly_heat, bbox_inches='tight')
fig = plt.figure(figsize=(8, 4))
im = plot_validation_accuracy_heatmap(sdss_rbf, x_range=gamma_range,
y_range=C_rbf_range, y_label='$C$', x_label='$\gamma$', power10='both')
plt.tick_params(top='off', right='off')
plt.colorbar(im)
fig.savefig(sdss_rbf_heat, bbox_inches='tight')
fig = plt.figure(figsize=(8, 4))
im = plot_validation_accuracy_heatmap(vstatlas_rbf, x_range=gamma_range,
y_range=C_rbf_range, y_label='$C$', x_label='$\gamma$', power10='both')
plt.tick_params(top='off', right='off')
plt.colorbar(im)
fig.savefig(vstatlas_rbf_heat, bbox_inches='tight')
X = np.asarray(sdss[sdss_features])
y = np.asarray(sdss[target_col])
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=200000, train_size=300001, random_state=29)
rbf = SVC(kernel='rbf', gamma=0.1, C=10, cache_size=2000, class_weight='auto')
poly = LinearSVC(C=0.1, loss='squared_hinge', penalty='l1', dual=False, multi_class='ovr',
fit_intercept=True, class_weight='auto', random_state=21)
logistic = LogisticRegression(penalty='l1', dual=False, C=1, multi_class='ovr', solver='liblinear', class_weight='auto', random_state=21)
forest = RandomForestClassifier(n_estimators=300, n_jobs=-1, class_weight='subsample', random_state=21)
classifiers = [forest, logistic, rbf, poly, poly]
degrees = [1, 2, 1, 2, 3]
sample_sizes = np.concatenate((np.arange(100, 1000, 100), np.arange(1000, 10000, 1000),
np.arange(10000, 100001, 10000), [200000, 300000]))
curve_labels = ['Random Forest', 'Logistic Regression', 'RBF SVM', 'Degree 2 Polynomial SVM', 'Degree 3 Polynomial SVM']
pickle_paths = ['../pickle/04_learning_curves/sdss_lc_forest.pickle',
'../pickle/04_learning_curves/sdss_lc_logistic_2.pickle',
'../pickle/04_learning_curves/sdss_lc_rbf.pickle',
'../pickle/04_learning_curves/sdss_lc_poly_2.pickle',
'../pickle/04_learning_curves/sdss_lc_poly_3.pickle']
for classifier, degree, pickle_path in zip(classifiers, degrees, pickle_paths):
if not results_exist(pickle_path):
learning_curve(classifier, X, y, cv, sample_sizes, degree, pickle_path)
all_learning_curves = load_results(pickle_paths)
for c in all_learning_curves:
print(np.array(c)[:, -1])
fig = plt.figure(figsize=(4, 4))
ax = plot_average_learning_curve(sample_sizes, all_learning_curves, curve_labels)
ax.set_xscale('log')
fig.savefig(fig_dir + '4_expt1/sdss_learning_curves.pdf', bbox_inches='tight')
logistic_lc = np.array(all_learning_curves[1])
rbf_lc = np.array(all_learning_curves[2])
print(np.mean(logistic_lc[:,-1]))
print(np.mean(rbf_lc[:,-1]))
X = np.asarray(vstatlas[vstatlas_features])
y = np.asarray(vstatlas[target_col])
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.3, train_size=0.7, random_state=29)
rbf = SVC(kernel='rbf', gamma=0.001, C=1000000, cache_size=2000, class_weight='auto')
poly = LinearSVC(C=1000, multi_class='crammer_singer',
fit_intercept=True, class_weight='auto', random_state=21)
logistic = LogisticRegression(penalty='l1', dual=False, C=100, multi_class='ovr', solver='liblinear', class_weight='auto', random_state=21)
forest = RandomForestClassifier(n_estimators=300, n_jobs=-1, class_weight='subsample', random_state=21)
classifiers = [forest, logistic, rbf, poly]
degrees = [1, 2, 1, 1]
sample_sizes = np.concatenate((np.arange(100, 1000, 100), np.arange(1000, 10000, 1000),
np.arange(10000, 30001, 10000), [35056]))
curve_labels = ['Random Forest', 'Logistic Regression', 'RBF SVM', 'Linear SVM']
pickle_paths = ['../pickle/04_learning_curves/vstatlas_lc_forest.pickle',
'../pickle/04_learning_curves/vstatlas_lc_logistic.pickle',
'../pickle/04_learning_curves/vstatlas_lc_rbf.pickle',
'../pickle/04_learning_curves/vstatlas_lc_poly.pickle']
for classifier, degree, pickle_path in zip(classifiers, degrees, pickle_paths):
if not results_exist(pickle_path):
learning_curve(classifier, X, y, cv, sample_sizes, degree, pickle_path)
all_learning_curves = load_results(pickle_paths)
for c in all_learning_curves:
print(np.array(c)[:, -1])
fig = plt.figure(figsize=(4, 4))
ax = plot_average_learning_curve(sample_sizes, all_learning_curves, curve_labels)
ax.set_xscale('log')
fig.savefig(fig_dir + '4_expt1/vstatlas_learning_curves.pdf', bbox_inches='tight')
logistic_lc = np.array(all_learning_curves[1])
rbf_lc = np.array(all_learning_curves[2])
print(np.max(logistic_lc[:,-1]))
print(np.max(rbf_lc[:,-1]))
transformer = PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
X_poly = transformer.fit_transform(X)
%%timeit -n 1 -r 1
rbf = SVC(kernel='rbf', gamma=0.001, C=1000, cache_size=2000, class_weight='auto', probability=True)
rbf.fit(X, y)
%%timeit -n 1 -r 1
rbf = SVC(kernel='rbf', gamma=0.1, C=10, cache_size=2000, class_weight='auto', probability=True)
rbf.fit(X, y)
%%timeit -n 1 -r 1
poly = LinearSVC(C=1000, multi_class='crammer_singer',
fit_intercept=True, class_weight='auto', random_state=21)
poly.fit(X, y)
%%timeit -n 1 -r 1
poly = LinearSVC(C=0.1, loss='squared_hinge', penalty='l1', dual=False, multi_class='ovr',
fit_intercept=True, class_weight='auto', random_state=21)
poly.fit(X_poly, y)
%%timeit -n 1 -r 1
logistic = LogisticRegression(penalty='l1', dual=False, C=100, multi_class='ovr', solver='liblinear', random_state=21)
logistic.fit(X_poly, y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hyperparameter Optimization
Step2: Logistic Regression
Step3: Multinomial has the highest score, but it doesn't give us reliable probability estimates. The next best option for VST ATLAS is {degree=2, multi_class='ovr', penalty='l1', C=100}.
Step4: SVM with Polynomial Kernel
Step5: The best parameters for the VST ATLAS dataset are {degree=1, multi_class='crammer-singer', C=1000}
Step6: SVM with RBF Kernel
Step7: The best one is {C = 1,000,000, gamma=0.001}.
Step8: Learning Curves
Step9: Upper bounds for Logistic Regression and RBF SVM
Step10: VST ATLAS Learning Curves
Step11: Appendix
|
7,378
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
!pip install apache_beam
!pip install 'scikit_learn~=0.23.0' # For gaussian_random_matrix.
!pip install annoy
import os
import sys
import pickle
from collections import namedtuple
from datetime import datetime
import numpy as np
import apache_beam as beam
from apache_beam.transforms import util
import tensorflow as tf
import tensorflow_hub as hub
import annoy
from sklearn.random_projection import gaussian_random_matrix
print('TF version: {}'.format(tf.__version__))
print('TF-Hub version: {}'.format(hub.__version__))
print('Apache Beam version: {}'.format(beam.__version__))
!wget 'https://dataverse.harvard.edu/api/access/datafile/3450625?format=tab&gbrecs=true' -O raw.tsv
!wc -l raw.tsv
!head raw.tsv
!rm -r corpus
!mkdir corpus
with open('corpus/text.txt', 'w') as out_file:
with open('raw.tsv', 'r') as in_file:
for line in in_file:
headline = line.split('\t')[1].strip().strip('"')
out_file.write(headline+"\n")
!tail corpus/text.txt
embed_fn = None
def generate_embeddings(text, model_url, random_projection_matrix=None):
# Beam will run this function in different processes that need to
# import hub and load embed_fn (if not previously loaded)
global embed_fn
if embed_fn is None:
embed_fn = hub.load(model_url)
embedding = embed_fn(text).numpy()
if random_projection_matrix is not None:
embedding = embedding.dot(random_projection_matrix)
return text, embedding
def to_tf_example(entries):
examples = []
text_list, embedding_list = entries
for i in range(len(text_list)):
text = text_list[i]
embedding = embedding_list[i]
features = {
'text': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[text.encode('utf-8')])),
'embedding': tf.train.Feature(
float_list=tf.train.FloatList(value=embedding.tolist()))
}
example = tf.train.Example(
features=tf.train.Features(
feature=features)).SerializeToString(deterministic=True)
examples.append(example)
return examples
def run_hub2emb(args):
'''Runs the embedding generation pipeline'''
options = beam.options.pipeline_options.PipelineOptions(**args)
args = namedtuple("options", args.keys())(*args.values())
with beam.Pipeline(args.runner, options=options) as pipeline:
(
pipeline
| 'Read sentences from files' >> beam.io.ReadFromText(
file_pattern=args.data_dir)
| 'Batch elements' >> util.BatchElements(
min_batch_size=args.batch_size, max_batch_size=args.batch_size)
| 'Generate embeddings' >> beam.Map(
generate_embeddings, args.model_url, args.random_projection_matrix)
| 'Encode to tf example' >> beam.FlatMap(to_tf_example)
| 'Write to TFRecords files' >> beam.io.WriteToTFRecord(
file_path_prefix='{}/emb'.format(args.output_dir),
file_name_suffix='.tfrecords')
)
def generate_random_projection_weights(original_dim, projected_dim):
random_projection_matrix = None
random_projection_matrix = gaussian_random_matrix(
n_components=projected_dim, n_features=original_dim).T
print("A Gaussian random weight matrix was creates with shape of {}".format(random_projection_matrix.shape))
print('Storing random projection matrix to disk...')
with open('random_projection_matrix', 'wb') as handle:
pickle.dump(random_projection_matrix,
handle, protocol=pickle.HIGHEST_PROTOCOL)
return random_projection_matrix
model_url = 'https://tfhub.dev/google/nnlm-en-dim128/2' #@param {type:"string"}
projected_dim = 64 #@param {type:"number"}
import tempfile
output_dir = tempfile.mkdtemp()
original_dim = hub.load(model_url)(['']).shape[1]
random_projection_matrix = None
if projected_dim:
random_projection_matrix = generate_random_projection_weights(
original_dim, projected_dim)
args = {
'job_name': 'hub2emb-{}'.format(datetime.utcnow().strftime('%y%m%d-%H%M%S')),
'runner': 'DirectRunner',
'batch_size': 1024,
'data_dir': 'corpus/*.txt',
'output_dir': output_dir,
'model_url': model_url,
'random_projection_matrix': random_projection_matrix,
}
print("Pipeline args are set.")
args
print("Running pipeline...")
%time run_hub2emb(args)
print("Pipeline is done.")
!ls {output_dir}
embed_file = os.path.join(output_dir, 'emb-00000-of-00001.tfrecords')
sample = 5
# Create a description of the features.
feature_description = {
'text': tf.io.FixedLenFeature([], tf.string),
'embedding': tf.io.FixedLenFeature([projected_dim], tf.float32)
}
def _parse_example(example):
# Parse the input `tf.Example` proto using the dictionary above.
return tf.io.parse_single_example(example, feature_description)
dataset = tf.data.TFRecordDataset(embed_file)
for record in dataset.take(sample).map(_parse_example):
print("{}: {}".format(record['text'].numpy().decode('utf-8'), record['embedding'].numpy()[:10]))
def build_index(embedding_files_pattern, index_filename, vector_length,
metric='angular', num_trees=100):
'''Builds an ANNOY index'''
annoy_index = annoy.AnnoyIndex(vector_length, metric=metric)
# Mapping between the item and its identifier in the index
mapping = {}
embed_files = tf.io.gfile.glob(embedding_files_pattern)
num_files = len(embed_files)
print('Found {} embedding file(s).'.format(num_files))
item_counter = 0
for i, embed_file in enumerate(embed_files):
print('Loading embeddings in file {} of {}...'.format(i+1, num_files))
dataset = tf.data.TFRecordDataset(embed_file)
for record in dataset.map(_parse_example):
text = record['text'].numpy().decode("utf-8")
embedding = record['embedding'].numpy()
mapping[item_counter] = text
annoy_index.add_item(item_counter, embedding)
item_counter += 1
if item_counter % 100000 == 0:
print('{} items loaded to the index'.format(item_counter))
print('A total of {} items added to the index'.format(item_counter))
print('Building the index with {} trees...'.format(num_trees))
annoy_index.build(n_trees=num_trees)
print('Index is successfully built.')
print('Saving index to disk...')
annoy_index.save(index_filename)
print('Index is saved to disk.')
print("Index file size: {} GB".format(
round(os.path.getsize(index_filename) / float(1024 ** 3), 2)))
annoy_index.unload()
print('Saving mapping to disk...')
with open(index_filename + '.mapping', 'wb') as handle:
pickle.dump(mapping, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Mapping is saved to disk.')
print("Mapping file size: {} MB".format(
round(os.path.getsize(index_filename + '.mapping') / float(1024 ** 2), 2)))
embedding_files = "{}/emb-*.tfrecords".format(output_dir)
embedding_dimension = projected_dim
index_filename = "index"
!rm {index_filename}
!rm {index_filename}.mapping
%time build_index(embedding_files, index_filename, embedding_dimension)
!ls
index = annoy.AnnoyIndex(embedding_dimension)
index.load(index_filename, prefault=True)
print('Annoy index is loaded.')
with open(index_filename + '.mapping', 'rb') as handle:
mapping = pickle.load(handle)
print('Mapping file is loaded.')
def find_similar_items(embedding, num_matches=5):
'''Finds similar items to a given embedding in the ANN index'''
ids = index.get_nns_by_vector(
embedding, num_matches, search_k=-1, include_distances=False)
items = [mapping[i] for i in ids]
return items
# Load the TF-Hub model
print("Loading the TF-Hub model...")
%time embed_fn = hub.load(model_url)
print("TF-Hub model is loaded.")
random_projection_matrix = None
if os.path.exists('random_projection_matrix'):
print("Loading random projection matrix...")
with open('random_projection_matrix', 'rb') as handle:
random_projection_matrix = pickle.load(handle)
print('random projection matrix is loaded.')
def extract_embeddings(query):
'''Generates the embedding for the query'''
query_embedding = embed_fn([query])[0].numpy()
if random_projection_matrix is not None:
query_embedding = query_embedding.dot(random_projection_matrix)
return query_embedding
extract_embeddings("Hello Machine Learning!")[:10]
#@title { run: "auto" }
query = "confronting global challenges" #@param {type:"string"}
print("Generating embedding for the query...")
%time query_embedding = extract_embeddings(query)
print("")
print("Finding relevant items in the index...")
%time items = find_similar_items(query_embedding, 10)
print("")
print("Results:")
print("=========")
for item in items:
print(item)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Semantic Search with Approximate Nearest Neighbors and Text Embeddings
Step2: Import the required libraries
Step3: 1. Download Sample Data
Step4: For simplicity, we only keep the headline text and remove the publication date
Step5: 2. Generate Embeddings for the Data.
Step6: Convert to tf.Example method
Step7: Beam pipeline
Step8: Generating Random Projection Weight Matrix
Step9: Set parameters
Step10: Run pipeline
Step11: Read some of the generated embeddings...
Step12: 3. Build the ANN Index for the Embeddings
Step13: 4. Use the Index for Similarity Matching
Step14: Similarity matching method
Step15: Extract embedding from a given query
Step16: Enter a query to find the most similar items
|
7,379
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_style('white')
from scipy.interpolate import griddata
# YOUR CODE HERE
#raise NotImplementedError()
#I worked with James Amarel
x=np.empty((1,))
x[0]=0
y=np.empty((1,))
y[0]=0
#hstack acts like appending but for arrays (:
for i in range(-4,5):
x=np.hstack((x,(i,i)))
x=np.hstack((x,np.array([-5]*11)))
x=np.hstack((x,np.array([5]*11)))
y=np.hstack((y,np.array([-5,5]*9)))
for i in range(-5,6):
y=np.hstack((y,(i)))
for i in range(-5,6):
y=np.hstack((y,(i)))
f=np.zeros_like(x)
f[0]=1
plt.scatter(x, y);
assert x.shape==(41,)
assert y.shape==(41,)
assert f.shape==(41,)
assert np.count_nonzero(f)==1
# YOUR CODE HERE
#raise NotImplementedError()
#creates right sized arrays, turns them to meshgrid
xnew = np.linspace(-5,5,100)
ynew=np.linspace(-5,5,100)
Xnew, Ynew = np.meshgrid(xnew,ynew)
#uses griddata to interpolate the 2D data in meshgrid form
#I helped Orion with this part
Fnew = griddata((x,y), f, (Xnew,Ynew), method = 'cubic', fill_value=0.0)
assert xnew.shape==(100,)
assert ynew.shape==(100,)
assert Xnew.shape==(100,100)
assert Ynew.shape==(100,100)
assert Fnew.shape==(100,100)
# YOUR CODE HERE
#raise NotImplementedError()
plt.contourf(Xnew, Ynew, Fnew, cmap="gist_ncar")
plt.colorbar(shrink=0.7)
plt.box(False)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Scalar Field, $f(x,y)$');
assert True # leave this to grade the plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sparse 2d interpolation
Step2: The following plot should show the points on the boundary and the single point in the interior
Step3: Use meshgrid and griddata to interpolate the function $f(x,y)$ on the entire square domain
Step4: Plot the values of the interpolated scalar field using a contour plot. Customize your plot to make it effective and beautiful.
|
7,380
|
<ASSISTANT_TASK:>
Python Code:
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
import pandas as pd
#I use this dataset because this has clearly separated cathegories,
#Read the database using pandas,
#Note that bad lines are omitted with error_bad_lines=False
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/00236/seeds_dataset.txt', header=None, sep="\t", error_bad_lines=False)
#The headers are not given in the dataset, so we give them afterwords:
#1. area A,
#2. perimeter P,
#3. compactness C = 4*pi*A/P^2,
#4. length of kernel,
#5. width of kernel,
#6. asymmetry coefficient
#7. length of kernel groove.
#8. Class: 1=Kama, 2=Rosa, 3=Canadian
df.columns = ["area","perimeter","compactness","kernel-length","kernel-width",
"asymmetry","kernel-groove-length","class"]
#This shows the header of the database:
df.head()
#In the database there are 3 classes of seeds:
#And skilearn can handle multiple classes
import numpy as np
#This sets class=2 to 0 and 3 to 1:
y = df.loc[:,'class']
#Extract all cathegories:
X=df.iloc[:,0:7]
#This is to convert the csv dictionary into a numpy matrix to later standarize:
X=X.as_matrix()
nfeature=X.shape[1]
# standardize features
X_std = np.copy(X)
for ifeat in range(0,nfeature):
X_std[:,ifeat] = (X[:,ifeat] - X[:,ifeat].mean()) / X[:,ifeat].std()
#Here since we have many features, we just plot the learning curves for the training and cross-validation sets.
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVC, Poly kernel, $\gamma=0.001$)"
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = SVC(kernel='poly',gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVC, RBF kernel, $\gamma=0.001$)"
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = SVC(kernel='rbf',gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (Linear SVC)"
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = svm.LinearSVC(C=1.0)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot learning curves of different classifiers
Step2: Pandas
Step3: Testing sklearn classifiers.
|
7,381
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'fgoals-g3', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
7,382
|
<ASSISTANT_TASK:>
Python Code:
%xmode Minimal
from larray import *
# define some scalars, axes and arrays
variant = 'baseline'
country = Axis('country=Belgium,France,Germany')
gender = Axis('gender=Male,Female')
time = Axis('time=2013..2017')
population = zeros([country, gender, time])
births = zeros([country, gender, time])
deaths = zeros([country, gender, time])
# create an empty session and objects one by one after
s = Session()
s.variant = variant
s.country = country
s.gender = gender
s.time = time
s.population = population
s.births = births
s.deaths = deaths
print(s.summary())
# or create a session in one step by passing all objects to the constructor
s = Session(variant=variant, country=country, gender=gender, time=time,
population=population, births=births, deaths=deaths)
print(s.summary())
class Demography(CheckedSession):
# (convention is to declare parameters (read-only objects) in capital letters)
# Declare 'VARIANT' parameter as of type string.
# 'VARIANT' will be initialized when a 'Demography' session will be created
VARIANT: str
# declare variables with an initialization value.
# Their type is deduced from their initialization value.
COUNTRY = Axis('country=Belgium,France,Germany')
GENDER = Axis('gender=Male,Female')
TIME = Axis('time=2013..2017')
population = zeros([COUNTRY, GENDER, TIME], dtype=int)
births = zeros([COUNTRY, GENDER, TIME], dtype=int)
# declare 'deaths' with constrained axes and dtype.
# Its type (Array), axes and dtype are not modifiable.
# It will be initialized with 0
deaths: CheckedArray([COUNTRY, GENDER, TIME], int) = 0
d = Demography(VARIANT='baseline')
print(d.summary())
# create a new Session object and load all arrays, axes, groups and metadata
# from all CSV files located in the passed directory
csv_dir = get_example_filepath('demography_eurostat')
s = Session(csv_dir)
# create a new Session object and load all arrays, axes, groups and metadata
# stored in the passed Excel file
filepath_excel = get_example_filepath('demography_eurostat.xlsx')
s = Session(filepath_excel)
# create a new Session object and load all arrays, axes, groups and metadata
# stored in the passed HDF5 file
filepath_hdf = get_example_filepath('demography_eurostat.h5')
s = Session(filepath_hdf)
print(s.summary())
# create a session containing 3 axes, 2 groups and one array 'population'
filepath = get_example_filepath('population_only.xlsx')
s = Session(filepath)
print(s.summary())
# call the load method on the previous session and add the 'births' and 'deaths' arrays to it
filepath = get_example_filepath('births_and_deaths.xlsx')
s.load(filepath)
print(s.summary())
births_and_deaths_session = Session()
# use the names argument to only load births and deaths arrays
births_and_deaths_session.load(filepath_hdf, names=['births', 'deaths'])
print(births_and_deaths_session.summary())
s = Session()
# with display=True, the load method will print a message
# each time a new item is loaded
s.load(filepath_hdf, display=True)
# save items of a session in CSV files.
# Here, the save method will create a 'demography' directory in which CSV files will be written
s.save('demography')
# save the session to an HDF5 file
s.save('demography.h5')
# save the session to an Excel file
s.save('demography.xlsx')
# use the names argument to only save births and deaths arrays
s.save('demography.h5', names=['births', 'deaths'])
# load session saved in 'demography.h5' to see its content
Session('demography.h5').names
population = read_csv('./demography/population.csv')
pop_ses = Session([('population', population)])
# by setting overwrite to False, the destination file is updated instead of overwritten.
# The items already stored in the file but not present in the session are left intact.
# On the contrary, the items that exist in both the file and the session are completely overwritten.
pop_ses.save('demography.h5', overwrite=False)
# load session saved in 'demography.h5' to see its content
Session('demography.h5').names
# with display=True, the save method will print a message
# each time an item is dumped
s.save('demography.h5', display=True)
# load a session representing the results of a demographic model
filepath_hdf = get_example_filepath('demography_eurostat.h5')
s = Session(filepath_hdf)
# print the content of the session
print(s.names)
# print the content of the session
print(s.summary())
s['population']
s.population
s_selected = s['population', 'births', 'deaths']
s_selected.names
d_selected = d['births', 'deaths']
# test if v_selected is a checked-session
print('is still a check-session?', isinstance(d_selected, CheckedSession))
#test if v_selected is a normal session
print('is now a normal session?', isinstance(d_selected, Session))
# select only arrays of a session
s.filter(kind=Array)
# selection all items with a name starting with a letter between a and k
s.filter(pattern='[a-k]*')
d_filtered = d.filter(pattern='[a-k]*')
# test if v_selected is a checked-session
print('is still a check-session?', isinstance(d_filtered, CheckedSession))
#test if v_selected is a normal session
print('is now a normal session?', isinstance(d_filtered, Session))
# iterate over item names
for key in s.keys():
print(key)
# iterate over items
for value in s.values():
if isinstance(value, Array):
print(value.info)
else:
print(repr(value))
print()
# iterate over names and items
for key, value in s.items():
if isinstance(value, Array):
print(key, ':')
print(value.info)
else:
print(key, ':', repr(value))
print()
class Demography(CheckedSession):
COUNTRY = Axis('country=Belgium,France,Germany')
GENDER = Axis('gender=Male,Female')
TIME = Axis('time=2013..2017')
population = zeros([COUNTRY, GENDER, TIME], dtype=int)
# declare the deaths array with constrained axes and dtype
deaths: CheckedArray([COUNTRY, GENDER, TIME], int) = 0
d = Demography()
print(d.summary())
# The population variable was initialized with the zeros() function which returns an Array object.
# The declared type of the population variable is Array and is protected
d.population = Axis('population=child,teenager,adult,elderly')
AGE = Axis('age=0..100')
d.deaths = zeros([d.COUNTRY, AGE, d.GENDER, d.TIME])
d.deaths = 1.2
d.deaths
d.deaths = 'undead'
# misspell population (forgot the 'a')
d.popultion = 0
# get population, births and deaths in millions
s_div = s / 1e6
s_div.population
from larray import random
random_increment = random.choice([-1, 0, 1], p=[0.3, 0.4, 0.3], axes=s.population.axes) * 1000
random_increment
# add some variables of a session by a common array
s_rand = s['population', 'births', 'deaths'] + random_increment
s_rand.population
# compute the difference between each array of the two sessions
s_diff = s - s_rand
s_diff.births
# add the next year to all arrays
def add_next_year(array):
if 'time' in array.axes.names:
last_year = array.time.i[-1]
return array.append('time', 0, last_year + 1)
else:
return array
s_with_next_year = s.apply(add_next_year)
print('population array before calling apply:')
print(s.population)
print()
print('population array after calling apply:')
print(s_with_next_year.population)
# add the next year to all arrays.
# Use the 'copy_values_from_last_year flag' to indicate
# whether to copy values from the last year
def add_next_year(array, copy_values_from_last_year):
if 'time' in array.axes.names:
last_year = array.time.i[-1]
value = array[last_year] if copy_values_from_last_year else 0
return array.append('time', value, last_year + 1)
else:
return array
s_with_next_year = s.apply(add_next_year, True)
print('population array before calling apply:')
print(s.population)
print()
print('population array after calling apply:')
print(s_with_next_year.population)
# load a session representing the results of a demographic model
filepath_hdf = get_example_filepath('demography_eurostat.h5')
s = Session(filepath_hdf)
# create a copy of the original session
s_copy = s.copy()
# 'element_equals' compare arrays one by one
s.element_equals(s_copy)
# 'equals' returns True if all items of the two sessions have exactly the same items
s.equals(s_copy)
# slightly modify the 'population' array for some labels combination
s_copy.population += random_increment
# the 'population' array is different between the two sessions
s.element_equals(s_copy)
# 'equals' returns False if at least one item of the two sessions are different in values or axes
s.equals(s_copy)
# reset the 'copy' session as a copy of the original session
s_copy = s.copy()
# add an array to the 'copy' session
s_copy.gender_ratio = s_copy.population.ratio('gender')
# the 'gender_ratio' array is not present in the original session
s.element_equals(s_copy)
# 'equals' returns False if at least one item is not present in the two sessions
s.equals(s_copy)
# reset the 'copy' session as a copy of the original session
s_copy = s.copy()
# slightly modify the 'population' array for some labels combination
s_copy.population += random_increment
s_check_same_values = s == s_copy
s_check_same_values.population
s_check_same_values.time
s_check_different_values = s != s_copy
s_check_different_values.population
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Three Kinds Of Sessions
Step2: CheckedSession
Step3: Loading and Dumping Sessions
Step4: 2) Call the load method on an existing session and pass the path to the Excel/HDF5 file or to the directory containing CSV files as first argument
Step5: The load method offers some options
Step6: 2) Setting the display argument to True, the load method will print a message each time a new item is loaded
Step7: Dumping Sessions (CSV, Excel, HDF5)
Step8: <div class="alert alert-info">
Step9: 2) By default, dumping a session to an Excel or HDF5 file will overwrite it. By setting the overwrite argument to False, you can choose to update the existing Excel or HDF5 file
Step10: 3) Setting the display argument to True, the save method will print a message each time an item is dumped
Step11: Exploring Content
Step12: To get more information of items of a session, the summary will provide not only the names of items but also the list of labels in the case of axes or groups and the list of axes, the shape and the dtype in the case of arrays
Step13: Selecting And Filtering Items
Step14: A simpler way consists in the use the syntax <session_var>.<item_name>
Step15: <div class="alert alert-warning">
Step16: <div class="alert alert-warning">
Step17: The filter method allows you to select all items of the same kind (i.e. all axes, or groups or arrays) or all items with names satisfying a given pattern
Step18: <div class="alert alert-warning">
Step19: Iterating over Items
Step20: Manipulating Checked Sessions
Step21: One of the specificities of checked-sessions is that the type of the contained objects is protected (it cannot change). Any attempt to assign a value of different type will raise an error
Step22: The death array has been declared as a CheckedArray.
Step23: The deaths array is also constrained by its declared dtype int. This means that if you try to assign a value of type float instead of int, the value will be converted to int if possible
Step24: or raise an error
Step25: It is possible to add a new variable after the checked-session has been initialized but in that case, a warning message is printed (in case you misspelled the name of variable while trying to modify it)
Step26: Arithmetic Operations On Sessions
Step27: with an array (please read the documentation of the random.choice function first if you don't know it)
Step28: with another session
Step29: Applying Functions On All Arrays
Step30: It is possible to pass a function with additional arguments
Step31: It is also possible to apply a function on non-Array objects of a session. Please refer the documentation of the apply method.
Step32: The == operator return a new session with boolean arrays with elements compared element-wise
Step33: This also works for axes and groups
Step34: The != operator does the opposite of == operator
|
7,383
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score, mean_absolute_error
from sklearn.model_selection import train_test_split
%matplotlib inline
abaloneDF = pd.read_csv('abalone.csv', names=['Sex', 'Length', 'Diameter', 'Height',
'Whole Weight', 'Shucked Weight',
'Viscera Weight', 'Shell Weight',
'Rings'])
abaloneDF['Male'] = (abaloneDF['Sex'] == 'M').astype(int)
abaloneDF['Female'] = (abaloneDF['Sex'] == 'F').astype(int)
abaloneDF['Infant'] = (abaloneDF['Sex'] == 'I').astype(int)
abaloneDF = abaloneDF[abaloneDF['Height'] > 0]
dataset = abaloneDF.drop(['Rings', 'Sex'],axis=1)
#We will start with the same number of components as variables
pca_model = PCA(n_components=10)
pca_model.fit(dataset)
#Plot the explained variance
plt.plot(range(1,11),pca_model.explained_variance_ratio_);
plt.xlabel('Principal Component');
plt.ylabel('Percentage Explained Variance');
df = pd.DataFrame(data=pca_model.components_)
df.index = dataset.columns
dfRed = df.ix[:,0:3]
dfRed.columns = range(1,5)
dfRed.plot.bar();
plt.ylabel('Coefficient');
#Remove the last 6 PCs
red_PCA = PCA(n_components=4)
red_PCA.fit(dataset)
rings = abaloneDF['Rings'].values.reshape(len(abaloneDF),1)
red_data = np.hstack([red_PCA.transform(dataset),rings])
red_df = pd.DataFrame(red_data,columns=['PC1','PC2','PC3','PC4','Rings'])
train, test = train_test_split(red_df,train_size=0.7)
xtrain = train.drop(['Rings'],axis=1)
ytrain = train['Rings']
xtest = test.drop(['Rings'],axis=1)
ytest = test['Rings']
regr = linear_model.LinearRegression()
regr.fit(xtrain, ytrain)
#Take a look at the regression coefficients
dict(zip(list(xtrain.columns),regr.coef_))
#Same function as in Part 1:
def plot_yyhat(ytest,ypred):
r2 = r2_score(ytest, ypred )
mae = mean_absolute_error(ytest, ypred)
absmin = min([ytest.min(),ypred.min()])
absmax = max([ytest.max(),ypred.max()])
ax = plt.axes()
ax.scatter(ytest,ypred)
ax.set_title('Y vs. YHat')
ax.axis([absmin, absmax, absmin, absmax])
ax.plot([absmin, absmax], [absmin, absmax],c="k")
ax.set_ylabel('Predicted Rings')
ax.set_xlabel('Actual Rings')
#Plot the text box
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
textStr = '$MAE=%.3f$\n$R2=%.3f$' % (mae, r2)
ax.text(0.05, 0.95, textStr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props);
ypred = regr.predict(xtest)
plot_yyhat(ytest,ypred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PCA Modeling
Step2: According to the figure above, the majority of the variance within the model can be explained using only the first four principal components. Since we know that most of these variables are highly correlated, a good assumption is that PCs 5 through 10 contain mostly noise and can be removed from consideration.
Step3: Regression Modeling
Step4: Again, let's get a sense of how well the model performed by looking at a Y-Yhat plot and some basic performance metrics
|
7,384
|
<ASSISTANT_TASK:>
Python Code:
def parse(line):
Parses a line from the colors dataset.
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.0
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def set_static_batch_shape(batch_size):
def apply(rgb, chars, length):
rgb.set_shape((batch_size, None))
chars.set_shape((batch_size, None, 256))
length.set_shape((batch_size,))
return rgb, chars, length
return apply
def load_dataset(data_dir, url, batch_size, training=True):
Loads the colors data at path into a tf.PaddedDataset.
path = tf.keras.utils.get_file(os.path.basename(url), url, cache_dir=data_dir)
dataset = tf.data.TextLineDataset(path)
dataset = dataset.skip(1)
dataset = dataset.map(parse)
dataset = dataset.cache()
dataset = dataset.repeat()
if training:
dataset = dataset.shuffle(buffer_size=3000)
dataset = dataset.padded_batch(
batch_size, padded_shapes=((None,), (None, 256), ()))
# To simplify the model code, we statically set as many of the shapes that we
# know.
dataset = dataset.map(set_static_batch_shape(batch_size))
return dataset
@autograph.convert()
class RnnColorbot(tf.keras.Model):
RNN Colorbot model.
def __init__(self):
super(RnnColorbot, self).__init__()
self.lower_cell = tf.contrib.rnn.LSTMBlockCell(256)
self.upper_cell = tf.contrib.rnn.LSTMBlockCell(128)
self.relu_layer = tf.layers.Dense(3, activation=tf.nn.relu)
def _rnn_layer(self, chars, cell, batch_size, training):
A single RNN layer.
Args:
chars: A Tensor of shape (max_sequence_length, batch_size, input_size)
cell: An object of type tf.contrib.rnn.LSTMBlockCell
batch_size: Int, the batch size to use
training: Boolean, whether the layer is used for training
Returns:
A Tensor of shape (max_sequence_length, batch_size, output_size).
hidden_outputs = []
autograph.utils.set_element_type(hidden_outputs, tf.float32)
state, output = cell.zero_state(batch_size, tf.float32)
for ch in chars:
cell_output, (state, output) = cell.call(ch, (state, output))
hidden_outputs.append(cell_output)
hidden_outputs = hidden_outputs.stack()
if training:
hidden_outputs = tf.nn.dropout(hidden_outputs, 0.5)
return hidden_outputs
def build(self, _):
Creates the model variables. See keras.Model.build().
self.lower_cell.build(tf.TensorShape((None, 256)))
self.upper_cell.build(tf.TensorShape((None, 256)))
self.relu_layer.build(tf.TensorShape((None, 128)))
self.built = True
def call(self, inputs, training=False):
The RNN model code. Uses Eager and
The model consists of two RNN layers (made by lower_cell and upper_cell),
followed by a fully connected layer with ReLU activation.
Args:
inputs: A tuple (chars, length)
training: Boolean, whether the layer is used for training
Returns:
A Tensor of shape (batch_size, 3) - the model predictions.
chars, length = inputs
batch_size = chars.shape[0]
seq = tf.transpose(chars, (1, 0, 2))
seq = self._rnn_layer(seq, self.lower_cell, batch_size, training)
seq = self._rnn_layer(seq, self.upper_cell, batch_size, training)
# Grab just the end-of-sequence from each output.
indices = tf.stack([length - 1, range(batch_size)], axis=1)
sequence_ends = tf.gather_nd(seq, indices)
return self.relu_layer(sequence_ends)
@autograph.convert()
def loss_fn(labels, predictions):
return tf.reduce_mean((predictions - labels) ** 2)
def model_fn(features, labels, mode, params):
Estimator model function.
chars = features['chars']
sequence_length = features['sequence_length']
inputs = (chars, sequence_length)
# Create the model. Simply using the AutoGraph-ed class just works!
colorbot = RnnColorbot()
colorbot.build(None)
if mode == tf.estimator.ModeKeys.TRAIN:
predictions = colorbot(inputs, training=True)
loss = loss_fn(labels, predictions)
learning_rate = params['learning_rate']
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
predictions = colorbot(inputs)
loss = loss_fn(labels, predictions)
return tf.estimator.EstimatorSpec(mode, loss=loss)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = colorbot(inputs)
predictions = tf.minimum(predictions, 1.0)
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def input_fn(data_dir, data_url, params, training=True):
An input function for training
batch_size = params['batch_size']
# load_dataset defined above
dataset = load_dataset(data_dir, data_url, batch_size, training=training)
# Package the pipeline end in a format suitable for the estimator.
labels, chars, sequence_length = dataset.make_one_shot_iterator().get_next()
features = {
'chars': chars,
'sequence_length': sequence_length
}
return features, labels
params = {
'batch_size': 64,
'learning_rate': 0.01,
}
train_url = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv"
test_url = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv"
data_dir = "tmp/rnn/data"
regressor = tf.estimator.Estimator(
model_fn=model_fn,
params=params)
regressor.train(
input_fn=lambda: input_fn(data_dir, train_url, params),
steps=100)
eval_results = regressor.evaluate(
input_fn=lambda: input_fn(data_dir, test_url, params, training=False),
steps=2
)
print('Eval loss at step %d: %s' % (eval_results['global_step'], eval_results['loss']))
def predict_input_fn(color_name):
An input function for prediction.
_, chars, sequence_length = parse(color_name)
# We create a batch of a single element.
features = {
'chars': tf.expand_dims(chars, 0),
'sequence_length': tf.expand_dims(sequence_length, 0)
}
return features, None
def draw_prediction(color_name, pred):
pred = pred * 255
pred = pred.astype(np.uint8)
plt.axis('off')
plt.imshow(pred)
plt.title(color_name)
plt.show()
def predict_with_estimator(color_name, regressor):
predictions = regressor.predict(
input_fn=lambda:predict_input_fn(color_name))
pred = next(predictions)
predictions.close()
pred = np.minimum(pred, 1.0)
pred = np.expand_dims(np.expand_dims(pred, 0), 0)
draw_prediction(color_name, pred)
tb = widgets.TabBar(["RNN Colorbot"])
while True:
with tb.output_to(0):
try:
color_name = six.moves.input("Give me a color name (or press 'enter' to exit): ")
except (EOFError, KeyboardInterrupt):
break
if not color_name:
break
with tb.output_to(0):
tb.clear_tab()
predict_with_estimator(color_name, regressor)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Case study
Step7: To show the use of control flow, we write the RNN loop by hand, rather than using a pre-built RNN model.
Step9: We will now create the model function for the custom Estimator.
Step11: We'll create an input function that will feed our training and eval data.
Step12: We now have everything in place to build our custom estimator and use it for training and eval!
Step14: And here's the same estimator used for inference.
|
7,385
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-lr', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
7,386
|
<ASSISTANT_TASK:>
Python Code:
Initialization
'''
Standard modules
'''
import os
import pickle
import csv
import time
from pprint import pprint
import json
import pymongo
import multiprocessing
import logging
import collections
'''
Analysis modules
'''
%matplotlib inline
%config InlineBackend.figure_format = 'retina' # render double resolution plot output for Retina screens
import matplotlib.pyplot as plt
import pandas as pd
'''
Custom modules
'''
import config
import utilities
import mongodb
import multiprocessing_workers
'''
R magic and packages
'''
# hide all RRuntimeWarnings
import warnings
warnings.filterwarnings('ignore')
# add home for R in anaconda on PATH sys env
os.environ['PATH'] += ':/opt/anaconda3/bin'
# load R magic
%load_ext rpy2.ipython
# load R packages
%R require(ggplot2)
'''
Misc
'''
nb_name = '20171024-daheng-prepare_ibm_tweets_news_data'
# all tweets with keywork 'ibm' in tweet_text field from ND IBM dataset
ibm_tweets_file = os.path.join(config.IBM_TWEETS_NEWS_DIR, 'ibm_tweets.json')
# based on ibm_tweets_file. Duplicate tweets with the same or similar tweet_text are removed
ibm_unique_tweets_file = os.path.join(config.IBM_TWEETS_NEWS_DIR, 'ibm_unique_tweets.json')
# manually selected news sources list by examing most common news sources of valid urls embedded in ibm unique tweets
# selected_news_sources_lst = ['www.forbes.com', 'finance.yahoo.com', 'venturebeat.com',
# 'medium.com', 'www.engadget.com', 'alltheinternetofthings.com',
# 'www.zdnet.com', 'www.wsj.com', 'www.cnbc.com']
selected_news_sources_lst = ['venturebeat', 'engadget', 'wsj', 'cnbc']
# manually collected ibm news data
ibm_news_file = os.path.join(config.HR_DIR, 'selected_ibm_news.csv')
# all tweets related to the 'social_capital_ceo_palihapitiya_watson_joke' news by cnbc
palihapitiya_watson_joke_tweets_file = os.path.join(config.HR_DIR, 'palihapitiya_watson_joke_tweets.csv')
# manually tag information of all tweets related to the 'social_capital_ceo_palihapitiya_watson_joke' news by cnbc
palihapitiya_watson_joke_tweets_tag_file = os.path.join(config.HR_DIR, 'palihapitiya_watson_joke_tweets_tag.csv')
%%time
Register
IBM_TWEETS_NEWS_DIR = os.path.join(DATA_DIR, 'ibm_tweets_news')
in config
DB_NAME = 'tweets_ek-2'
COL_NAME = 'tw_nt'
if 0 == 1:
multiprocessing.log_to_stderr(logging.DEBUG)
'''
Use multiprocessing to parse tweet_text field for "ibm" keyword
'''
procedure_name = 'tag_native_tweets_text_ibm'
# set processes number to CPU numbers minus 1
process_num = multiprocessing.cpu_count() - 1
process_file_names_lst = ['{}-{}.json'.format(process_ind, procedure_name)
for process_ind in range(process_num)]
process_files_lst = [os.path.join(config.IBM_TWEETS_NEWS_DIR, process_file_name)
for process_file_name in process_file_names_lst]
jobs = []
for process_ind in range(process_num):
p = multiprocessing.Process(target=multiprocessing_workers.find_keywords_in_tweet_text,
args=(DB_NAME, COL_NAME, process_ind, process_num, process_files_lst[process_ind], ['ibm']),
name='Process-{}/{}'.format(process_ind, process_num))
jobs.append(p)
for job in jobs:
job.start()
for job in jobs:
job.join()
%%time
Merger all process files into a single file
Register
ibm_tweets_file = os.path.join(config.IBM_TWEETS_NEWS_DIR, 'ibm_tweets.json')
in Initialization section.
if 0 == 1:
'''
Re-generate process file names
'''
procedure_name = 'tag_native_tweets_text_ibm'
process_num = multiprocessing.cpu_count() - 1
process_file_names_lst = ['{}-{}.json'.format(process_ind, procedure_name)
for process_ind in range(process_num)]
process_files_lst = [os.path.join(config.IBM_TWEETS_NEWS_DIR, process_file_name)
for process_file_name in process_file_names_lst]
with open(ibm_tweets_file, 'w') as output_f:
for process_file in process_files_lst:
with open(process_file, 'r') as input_f:
for line in input_f:
output_f.write(line)
%%time
Remove tweets with the same or silimar tweet_text field
Register
ibm_unique_tweets_file = os.path.join(config.IBM_TWEETS_NEWS_DIR, 'ibm_unique_tweets.json')
in Initialization section.
if 0 == 1:
with open(ibm_unique_tweets_file, 'w') as output_f:
with open(ibm_tweets_file, 'r') as input_f:
uniqe_tweet_text_field = set()
for line in input_f:
tweet_json = json.loads(line)
tweet_text = tweet_json['text']
cleaned_tweet_text = utilities.clean_tweet_text(tweet_text)
if cleaned_tweet_text not in uniqe_tweet_text_field:
uniqe_tweet_text_field.add(cleaned_tweet_text)
output_f.write(line)
Check number of ibm tweets and number of ibm unique tweets
if 1 == 1:
with open(ibm_tweets_file, 'r') as f:
ibm_tweets_num = sum([1 for line in f])
print('Number of ibm tweets: {}'.format(ibm_tweets_num))
with open(ibm_unique_tweets_file, 'r') as f:
ibm_unique_tweets_num = sum([1 for line in f])
print('Number of unique ibm tweets: {}'.format(ibm_unique_tweets_num))
Check number of ibm unique tweets with URL
if 1 == 1:
with open(ibm_unique_tweets_file, 'r') as f:
# if entities.urls field is not empty
ibm_unique_tweets_url_num = sum([1 for line in f
if json.loads(line)['entities']['urls']])
print('Number of unique ibm tweets with URL: {}'.format(ibm_unique_tweets_url_num))
%%time
Check most popular domain names in URLs embedded in ibm unique tweets
if 1 == 1:
url_domain_names_counter = collections.Counter()
with open(ibm_unique_tweets_file, 'r') as f:
for line in f:
tweet_json = json.loads(line)
# if tweet contains at least one url, entities.urls is not empty
entities_urls = tweet_json['entities']['urls']
if entities_urls:
for entities_url in entities_urls:
# expanded_url field may contain full unshortened url
expanded_url = entities_url['expanded_url']
url_domain_name = expanded_url.split('/')[2]
url_domain_names_counter.update([url_domain_name])
pprint(url_domain_names_counter.most_common(50))
%%time
Re-compute most popular domain names in URLs embedded in ibm unique tweets
- ignore misc irrelevant website domain names
- ignore all shortened urls
Register
selected_news_sources_lst
in Initialization section.
misc_irrelevant_websites_lst = ['twitter', 'youtube', 'youtu.be', 'amazon', 'paper.li', 'linkedin', 'lnkd.in', 'instagram']
shortened_url_identifiers_lst = ['bit.ly', 'ift.tt', 'dlvr.it', 'ow.ly', 'buff.ly', 'oal.lu', 'goo.gl', 'ln.is', 'gag.gl', 'fb.me', 'trap.it', 'ibm.co',
'ibm.biz', 'shar.es', 'crwd.fr', 'klou.tt', 'tek.io', 'owler.us', 'upflow.co', 'hubs.ly', 'zd.net', 'spr.ly', 'flip.it']
if 0 == 1:
valid_url_domain_names_counter = collections.Counter()
ignore_lst = misc_irrelevant_websites_lst + shortened_url_identifiers_lst
with open(ibm_unique_tweets_file, 'r') as f:
for line in f:
tweet_json = json.loads(line)
# if tweet contains at least one url, entities.urls is not empty
entities_urls = tweet_json['entities']['urls']
if entities_urls:
for entities_url in entities_urls:
# expanded_url field may contain full unshortened url
expanded_url = entities_url['expanded_url']
# ignore all urls with manually selected tokens
if not any(token in expanded_url for token in ignore_lst):
# ignore all shortned urls by HEURISTIC
if len(expanded_url.split('/')) > 4:
valid_url_domain_name = expanded_url.split('/')[2]
valid_url_domain_names_counter.update([valid_url_domain_name])
pprint(valid_url_domain_names_counter.most_common(50))
%%time
Check most common valid links
misc_irrelevant_websites_lst = ['twitter', 'youtube', 'youtu.be', 'amazon', 'paper.li', 'linkedin', 'lnkd.in', 'instagram']
shortened_url_identifiers_lst = ['bit.ly', 'ift.tt', 'dlvr.it', 'ow.ly', 'buff.ly', 'oal.lu', 'goo.gl', 'ln.is', 'gag.gl', 'fb.me', 'trap.it', 'ibm.co',
'ibm.biz', 'shar.es', 'crwd.fr', 'klou.tt', 'tek.io', 'owler.us', 'upflow.co', 'hubs.ly', 'zd.net', 'spr.ly', 'flip.it']
if 0 == 1:
urls_counter = collections.Counter()
ignore_lst = misc_irrelevant_websites_lst + shortened_url_identifiers_lst
with open(ibm_unique_tweets_file, 'r') as f:
for line in f:
tweet_json = json.loads(line)
# if tweet contains at least one url, entities.urls is not empty
entities_urls = tweet_json['entities']['urls']
if entities_urls:
for entities_url in entities_urls:
# expanded_url field may contain full unshortened url
expanded_url = entities_url['expanded_url']
# ignore all urls with manually selected tokens
if not any(token in expanded_url for token in ignore_lst):
# ignore all shortned urls by HEURISTIC
if len(expanded_url.split('/')) > 4:
urls_counter.update([expanded_url])
pprint(urls_counter.most_common(50))
%%time
Check most common links to selected news sources
if 0 == 1:
selected_news_sources_urls_counter = collections.Counter()
with open(ibm_tweets_file, 'r') as f:
for line in f:
tweet_json = json.loads(line)
# if tweet contains at least one url, entities.urls is not empty
entities_urls = tweet_json['entities']['urls']
if entities_urls:
for entities_url in entities_urls:
# expanded_url field may contain full unshortened url
expanded_url = entities_url['expanded_url']
# filter out only url links to selected news sources
if any(selected_news_source in expanded_url for selected_news_source in selected_news_sources_lst):
selected_news_sources_urls_counter.update([expanded_url])
pprint(selected_news_sources_urls_counter.most_common(50))
Register
ibm_news_file
in Initialization section.
Load in csv file
if 1 == 1:
ibm_news_df = pd.read_csv(filepath_or_buffer=ibm_news_file, sep='\t')
with pd.option_context('display.max_colwidth', 100, 'expand_frame_repr', False):
display(ibm_news_df[['NEWS_DATE', 'NEWS_NAME', 'NEWS_DOC']])
Print any news_doc by paragraphs
test_lst = ibm_news_df.iloc[10]['NEWS_DOC'].split('::::::::')
for ind, item in enumerate(test_lst):
print('({})'.format(ind+1))
print(item)
%%time
Find out all tweets related to the 'social_capital_ceo_palihapitiya_watson_joke' news
News URL 1: https://www.cnbc.com/2017/05/08/ibms-watson-is-a-joke-says-social-capital-ceo-palihapitiya.html
News URL 2: https://www.cnbc.com/2017/05/09/no-joke-id-like-to-see-my-firm-go-head-to-head-with-ibm-on-a-i-palihapitiya.html
Register
palihapitiya_watson_joke_tweets_file
in Initialization section
if 0 == 1:
target_news_keywords_lst = ['social capital', 'chamath', 'palihapitiya']
target_tweets_dict_lst = []
with open(ibm_unique_tweets_file, 'r') as f:
for line in f:
tweet_json = json.loads(line)
tweet_text = tweet_json['text'].replace('\n', ' ').replace('\r', ' ')
tweet_user_screen_name = tweet_json['user']['screen_name']
tweet_created_at = utilities.parse_tweet_post_time(tweet_json['created_at'])
if any(kw.lower() in tweet_text.lower() for kw in target_news_keywords_lst):
target_tweet_dict = {'tweet_created_at': tweet_created_at,
'tweet_user_screen_name': tweet_user_screen_name,
'tweet_text': tweet_text}
target_tweets_dict_lst.append(target_tweet_dict)
target_tweets_df = pd.DataFrame(target_tweets_dict_lst)
target_tweets_df.to_csv(path_or_buf=palihapitiya_watson_joke_tweets_file, sep='\t', index=True, quoting=csv.QUOTE_MINIMAL)
Read in data
if 1 == 1:
target_tweets_df = pd.read_csv(filepath_or_buffer=palihapitiya_watson_joke_tweets_file,
sep='\t',
index_col=0,
parse_dates=['tweet_created_at'],
quoting=csv.QUOTE_MINIMAL)
with pd.option_context('display.max_rows', 260, 'display.max_colwidth', 150, 'expand_frame_repr', False):
display(target_tweets_df)
Register
palihapitiya_watson_joke_tweets_tag_file
in Initialization section
Load data
if 1 == 1:
'''
Read in all tweets related to the 'social_capital_ceo_palihapitiya_watson_joke' news
'''
target_tweets_df = pd.read_csv(filepath_or_buffer=palihapitiya_watson_joke_tweets_file,
sep='\t',
index_col=0,
parse_dates=['tweet_created_at'],
quoting=csv.QUOTE_MINIMAL)
'''
Read in manually tagged information for all tweets just loaded
'''
target_tweets_tag_df = pd.read_csv(filepath_or_buffer=palihapitiya_watson_joke_tweets_tag_file,
sep='\t',
index_col=0)
'''
Combine dfs and set index
'''
test_tweets_df = target_tweets_df.join(target_tweets_tag_df)
test_tweets_df['tweet_index'] = test_tweets_df.index
test_tweets_df = test_tweets_df.set_index('tweet_created_at')
Check tweets related to second news
if 1 == 1:
test_df = test_tweets_df[test_tweets_df['tweet_news'] == 2]
display(test_df)
For tweets related to first news
Build tmp dfs for tweets in mild sentiment and harsh sentiment separately
if 1 == 1:
mild_cond = (test_tweets_df['tweet_news'] == 1) & (test_tweets_df['tweet_sentiment'] == 2)
harsh_cond = (test_tweets_df['tweet_news'] == 1) & (test_tweets_df['tweet_sentiment'] == 3)
mild_tweets_df = test_tweets_df[mild_cond]
harsh_tweets_df = test_tweets_df[harsh_cond]
Check tweets in mild sentiment
print(mild_tweets_df['tweet_index'].count())
with pd.option_context('display.max_rows', 100, 'display.max_colwidth', 150, 'expand_frame_repr', False):
display(mild_tweets_df)
Check tweets in harsh sentiment
print(harsh_tweets_df['tweet_index'].count())
with pd.option_context('display.max_rows', 100, 'display.max_colwidth', 150, 'expand_frame_repr', False):
display(harsh_tweets_df)
Bin mild/harsh tweets by 4H period and count numbers
if 1 == 1:
mild_tweets_bin_count = mild_tweets_df['tweet_index'].resample('4H', convention='start').count().rename('mild_tweets_count')
harsh_tweets_bin_count = harsh_tweets_df['tweet_index'].resample('4H', convention='start').count().rename('harsh_tweets_count')
tweets_count = pd.concat([mild_tweets_bin_count, harsh_tweets_bin_count], axis=1)[:24]
with pd.option_context('display.max_rows', 100, 'display.max_colwidth', 150, 'expand_frame_repr', False):
display(tweets_count)
if 1 == 1:
tweets_count.plot(kind="bar", figsize=(12,6), title='# of mild/harsh tweets', stacked=True)
Prepare df data
if 1 == 1:
'''
Read in all tweets related to the 'social_capital_ceo_palihapitiya_watson_joke' news
'''
target_tweets_df = pd.read_csv(filepath_or_buffer=palihapitiya_watson_joke_tweets_file,
sep='\t',
index_col=0,
parse_dates=['tweet_created_at'],
quoting=csv.QUOTE_MINIMAL)
'''
Read in manually tagged information for all tweets just loaded
'''
target_tweets_tag_df = pd.read_csv(filepath_or_buffer=palihapitiya_watson_joke_tweets_tag_file,
sep='\t',
index_col=0)
'''
Join dfs and set index
'''
test_tweets_df = target_tweets_df.join(target_tweets_tag_df)
test_tweets_df['tweet_index'] = test_tweets_df.index
test_tweets_df = test_tweets_df.set_index('tweet_created_at')
'''
Bin mild/harsh tweets by 4H period and count numbers
'''
mild_tweets_df = test_tweets_df[(test_tweets_df['tweet_news'] == 1) & (test_tweets_df['tweet_sentiment'] == 2)]
harsh_tweets_df = test_tweets_df[(test_tweets_df['tweet_news'] == 1) & (test_tweets_df['tweet_sentiment'] == 3)]
second_news_mild_tweets_df = test_tweets_df[(test_tweets_df['tweet_news'] == 2) & (test_tweets_df['tweet_sentiment'] == 2)]
mild_tweets_bin_count = mild_tweets_df['tweet_index'].resample('4H', label='start', loffset='2H 1S').count().rename('mild_tweets_count')
harsh_tweets_bin_count = harsh_tweets_df['tweet_index'].resample('4H', label='start', loffset='2H 1S').count().rename('harsh_tweets_count')
second_news_mild_tweets_bin_count = second_news_mild_tweets_df['tweet_index'].resample('4H', label='start', loffset='2H 1S').count().rename('second_news_mild_tweets_count')
tweets_count = pd.concat([mild_tweets_bin_count, harsh_tweets_bin_count, second_news_mild_tweets_bin_count], axis=1)
'''
Misc operations
'''
tweets_count = tweets_count.fillna(0)
tweets_count['mild_tweets_count'] = tweets_count['mild_tweets_count'].astype(int)
tweets_count['harsh_mild_diff'] = tweets_count['harsh_tweets_count'] - tweets_count['mild_tweets_count']
tweets_count['mild_tweets_count_neg'] = - tweets_count['mild_tweets_count']
tweets_count['second_news_mild_tweets_count'] = tweets_count['second_news_mild_tweets_count'].astype(int)
tweets_count['second_news_mild_tweets_count_neg'] = - tweets_count['second_news_mild_tweets_count']
tweets_count.reset_index(drop=False, inplace=True)
tweets_r_df = tweets_count
tweets_r_df
%%R -i tweets_r_df
#
# Prepare data
#
# cast data types
tweets_r_df$tweet_created_at <- as.POSIXct(strptime(tweets_r_df$tweet_created_at, format="%Y-%m-%d %H:%M:%S"))
#
# Plot and tweak histogram
#
# initialize new plot
# cols <- c('Harsh'='red', 'Mild'='blue', 'diff_line'='black')
plt <- ggplot(data=tweets_r_df, aes(x=tweet_created_at)) +
# layers of ref lines for publishing times of first and second news
geom_vline(xintercept=as.POSIXct(strptime('2017-05-08 16:45:00', format="%Y-%m-%d %H:%M:%S")), linetype='dashed', color='grey80') +
geom_vline(xintercept=as.POSIXct(strptime('2017-05-09 09:55:00', format="%Y-%m-%d %H:%M:%S")), linetype='dashed', color='grey80') +
# layer of geom_bar for harsh tweets
geom_bar(aes(y=harsh_tweets_count, fill='Harsh'), stat='identity', alpha=0.65) +
# layer of geom_rect for highlighting largest bar
geom_rect(aes(xmin=as.POSIXct(strptime('2017-05-09 12:15:00', format="%Y-%m-%d %H:%M:%S")),
xmax=as.POSIXct(strptime('2017-05-09 15:45:00', format="%Y-%m-%d %H:%M:%S")),
ymin=0, ymax=27), fill=NA, color="red", size=0.7, alpha=1) +
# layer of geom_bar for mild tweets
geom_bar(aes(y=mild_tweets_count_neg, fill='Mild'), stat='identity', alpha=0.65) +
# layer of geom_line for diff between harsh tweets and mild tweets
geom_line(aes(x=(tweet_created_at), y=harsh_mild_diff), stat='identity', linetype='solid') +
# layer of geom_bar for a few tweets related to second news in mild sentiment
geom_bar(aes(y=second_news_mild_tweets_count_neg), stat='identity', alpha=0.65, fill='green') +
# x-axis and y-axis
scale_x_datetime(name = 'Time',
date_labels = "%b %d %I%p",
date_breaks = "4 hour",
expand = c(0, 0),
limits = c(as.POSIXct(strptime('2017-05-08 12:00:00', format="%Y-%m-%d %H:%M:%S")),
as.POSIXct(strptime('2017-05-10 19:00:00', format="%Y-%m-%d %H:%M:%S")))) +
scale_y_continuous(name = 'Number of users',
breaks = c(-10, -5, 0, 5, 10, 15, 20, 25),
labels = c('10', '5', '0', '5', '10', '15', '20', '25'),
limits = c(-15, 30)) +
# legend
scale_fill_manual(name = "Sentiment Intensity",
values = c('Harsh'='red', 'Mild'='blue')) +
# theme
theme(panel.background = element_blank(),
axis.line = element_line(color='black'),
panel.grid.major.y = element_line(color='grey80'),
panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank(),
axis.text.x = element_text(angle=90),
legend.position = 'top')
#
# Output figure
#
ggsave('./fig/ibm_joke_or_not.png', plt, height=5, width=5, dpi=200)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare tweets and news data for IBM topic
Step3: Prepare multiprocessing and MongoDB scripts available in ibm_tweets_analysis project
Step5: Merge process files
Step7: Remove duplicate tweets
Step14: Check basic statistics of embedded URL link in tweet_text to external news article
Step16: Manually collect external news articles
Step19: Check ibm_news basic statistics
Step22: Updated Objective
Step24: Manually tag each tweet
Step31: Check data and quick plot
Step33: Plot use R ggplot2
|
7,387
|
<ASSISTANT_TASK:>
Python Code:
%%time
with open("simhash_sorted.txt") as f:
simhashes = [int(line[:-1]) for line in f.readlines()]
simhashes = np.array(simhashes, dtype=np.uint64) # found out before that simhash fits uint64
SIMHASH_SIZE = 64
num_samples = len(simhashes)
print "Number of samples:", num_samples
print "SimHash example:", format(simhashes[0], "b")
print "SimHash size:", SIMHASH_SIZE
MAX_DISTANCE = 3
NUM_PARTS = MAX_DISTANCE + 1
PART_SIZE = SIMHASH_SIZE / NUM_PARTS
neg_part_mask = "0" * PART_SIZE
pos_part_mask = "1" * PART_SIZE
masks = [neg_part_mask * part_id + pos_part_mask + neg_part_mask * (NUM_PARTS - part_id - 1)\
for part_id in range(NUM_PARTS)]
masks = np.array([int(mask, 2) for mask in masks], dtype=np.uint64)
def get_part(simhash, part_id):
return int(simhash & masks[part_id]) >> (PART_SIZE * (NUM_PARTS - part_id - 1))
%%time
simhashes_parts = np.zeros((len(simhashes), NUM_PARTS), dtype=np.int32)
for simhash_id, simhash in enumerate(simhashes):
for part_id in xrange(NUM_PARTS):
simhashes_parts[simhash_id][part_id] = get_part(simhash, part_id)
%%time
indices = [[list() for __ in xrange(2 ** PART_SIZE)] for _ in xrange(NUM_PARTS)]
for simhash_id in xrange(num_samples):
simhash_parts = simhashes_parts[simhash_id]
for part_id in xrange(NUM_PARTS):
indices[part_id][simhash_parts[part_id]].append(simhash_id)
def ones_positions(num_ones, size=SIMHASH_SIZE):
if num_ones == 0:
yield []
return
for position in range(size):
for positions in ones_positions(num_ones - 1, size):
yield [position] + positions
accepted_xors = set()
for num_ones in xrange(MAX_DISTANCE + 1):
for positions in ones_positions(num_ones):
xor = ["0"] * SIMHASH_SIZE
for pos in positions:
xor[pos] = "1"
accepted_xors.add(np.uint64(int("".join(xor), 2)))
print len(accepted_xors)
def similar(hash1, hash2):
return (hash1 ^ hash2) in accepted_xors
%%time
groups_sizes = []
assigned = [False] * num_samples # indicators of simhashes assigned to any of the considered groups
num_assigned = 0
start = time()
for simhash_id, simhash in enumerate(simhashes):
if assigned[simhash_id]:
continue
group_size = 0
simhash_parts = simhashes_parts[simhash_id]
for part_id, part in enumerate(simhash_parts):
for candidate_id in indices[part_id][part]:
if assigned[candidate_id]:
continue
if similar(simhash, simhashes[candidate_id]):
group_size += 1
assigned[candidate_id] = True #.add(candidate_id)
num_assigned += 1
groups_sizes.append(group_size)
if simhash_id % 3000 == 0:
spent = time() - start
clear_output()
print "assigned: {}\tRemained time: {:.2f} days".format(
num_assigned,(float(num_samples) / num_assigned - 1) * spent / 60 / 60 / 24)
groups_sizes = np.array(groups_sizes)
plt.figure(figsize=(12,8))
plt.plot(groups_sizes);
plt.xlabel("Group ID")
plt.ylabel("Group size");
plt.figure(figsize=(12,8))
plt.hist(groups_sizes, bins=100, log=True)
plt.xlabel("Group size")
plt.ylabel("Number of groups");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Поделим simhash-и на 4 части для индексирования.
Step2: Построим индексы.
Step3: Прокластеризуем хеши.
Step4: Отработав 6 часов, скрипт обещал работать еще около 9 суток, что слишком долго. Поэтому проведём анализ уже полученных данных как репрезентативной выборки.
Step5: Как видим, с течением времени в размере групп не наблюдается какого-либо тренда.
|
7,388
|
<ASSISTANT_TASK:>
Python Code:
import findspark
findspark.init()
import pyspark
import numpy as np
conf = pyspark.SparkConf().\
setAppName('sentiment-analysis').\
setMaster('local[*]')
from pyspark.sql import SQLContext, HiveContext
sc = pyspark.SparkContext(conf=conf)
sqlContext = HiveContext(sc)
# dataframe functions
from pyspark.sql import functions as fn
# Create a RDDs
documents_rdd = sc.parallelize([
[1, 'cats are cute', 0],
[2, 'dogs are playfull', 0],
[3, 'lions are big', 1],
[4, 'cars are fast', 1]])
users_rdd = sc.parallelize([
[0, 'Alice', 20],
[1, 'Bob', 23],
[2, 'Charles', 32]])
documents_df = documents_rdd.toDF(['doc_id', 'text', 'user_id'])
users_df = users_rdd.toDF(['user_id', 'name', 'age'])
documents_df.printSchema()
users_df.printSchema()
from pyspark.sql import functions as fn
# compute the average age of users
user_age_df = users_df.select(fn.avg('age'))
user_age_df
user_age_df.show()
users_df.join(documents_df, on='user_id').show()
users_df.join(documents_df, on='user_id', how='left').show()
users_df.join(documents_df, 'user_id', how='left').\
groupby('user_id', 'name').\
agg(fn.count('text')).\
show()
users_df.join(documents_df, 'user_id', how='left').\
groupby('user_id', 'name').\
agg(fn.count('text').alias('n_pets')).\
show()
users_df.withColumn('name_length', fn.length('name')).show()
from pyspark.ml.feature import Tokenizer
# the tokenizer object
tokenizer = Tokenizer().setInputCol('text').setOutputCol('words')
tokenizer.transform(documents_df).show()
from pyspark.ml.feature import CountVectorizer
count_vectorizer_estimator = CountVectorizer().setInputCol('words').setOutputCol('features')
count_vectorizer_transformer = count_vectorizer_estimator.fit(tokenizer.transform(documents_df))
count_vectorizer_transformer.transform(tokenizer.transform(documents_df)).show(truncate=False)
# list of words in the vocabulary
count_vectorizer_transformer.vocabulary
np.array(count_vectorizer_transformer.vocabulary)[[0, 3, 5]]
from pyspark.ml import Pipeline
pipeline_cv_estimator = Pipeline(stages=[tokenizer, count_vectorizer_estimator])
pipeline_cv_transformer = pipeline_cv_estimator.fit(documents_df)
pipeline_cv_transformer.transform(documents_df).show()
!wget https://github.com/daniel-acuna/python_data_science_intro/blob/master/data/imdb_reviews_preprocessed.parquet.zip?raw=true -O imdb_reviews_preprocessed.parquet.zip && unzip imdb_reviews_preprocessed.parquet.zip && rm imdb_reviews_preprocessed.parquet.zip
!wget https://github.com/daniel-acuna/python_data_science_intro/blob/master/data/sentiments.parquet.zip?raw=true -O sentiments.parquet.zip && unzip sentiments.parquet.zip && rm sentiments.parquet.zip
!wget https://github.com/daniel-acuna/python_data_science_intro/blob/master/data/tweets.parquet.zip?raw=true -O tweets.parquet.zip && unzip tweets.parquet.zip && rm tweets.parquet.zip
sentiments_df = sqlContext.read.parquet('sentiments.parquet')
sentiments_df.printSchema()
# a sample of positive words
sentiments_df.where(fn.col('sentiment') == 1).show(5)
# a sample of negative words
sentiments_df.where(fn.col('sentiment') == -1).show(5)
sentiments_df.groupBy('sentiment').agg(fn.count('*')).show()
imdb_reviews_df = sqlContext.read.parquet('imdb_reviews_preprocessed.parquet')
imdb_reviews_df.where(fn.col('score') == 1).first()
imdb_reviews_df.where(fn.col('score') == 0).first()
from pyspark.ml.feature import RegexTokenizer
tokenizer = RegexTokenizer().setGaps(False)\
.setPattern("\\p{L}+")\
.setInputCol("review")\
.setOutputCol("words")
review_words_df = tokenizer.transform(imdb_reviews_df)
print(review_words_df)
review_words_df.show(5)
review_words_df.select('id', fn.explode('words').alias('word')).show(5)
review_word_sentiment_df = review_words_df.\
select('id', fn.explode('words').alias('word')).\
join(sentiments_df, 'word')
review_word_sentiment_df.show(5)
simple_sentiment_prediction_df = review_word_sentiment_df.\
groupBy('id').\
agg(fn.avg('sentiment').alias('avg_sentiment')).\
withColumn('predicted', fn.when(fn.col('avg_sentiment') > 0, 1.0).otherwise(0.))
simple_sentiment_prediction_df.show(5)
imdb_reviews_df.\
join(simple_sentiment_prediction_df, 'id').\
select(fn.expr('float(score = predicted)').alias('correct')).\
select(fn.avg('correct')).\
show()
# we obtain the stop words from a website
import requests
stop_words = requests.get('http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words').text.split()
stop_words[0:10]
from pyspark.ml.feature import StopWordsRemover
sw_filter = StopWordsRemover()\
.setStopWords(stop_words)\
.setCaseSensitive(False)\
.setInputCol("words")\
.setOutputCol("filtered")
from pyspark.ml.feature import CountVectorizer
# we will remove words that appear in 5 docs or less
cv = CountVectorizer(minTF=1., minDF=5., vocabSize=2**17)\
.setInputCol("filtered")\
.setOutputCol("tf")
# we now create a pipelined transformer
cv_pipeline = Pipeline(stages=[tokenizer, sw_filter, cv]).fit(imdb_reviews_df)
# now we can make the transformation between the raw text and the counts
cv_pipeline.transform(imdb_reviews_df).show(5)
from pyspark.ml.feature import IDF
idf = IDF().\
setInputCol('tf').\
setOutputCol('tfidf')
idf_pipeline = Pipeline(stages=[cv_pipeline, idf]).fit(imdb_reviews_df)
idf_pipeline.transform(imdb_reviews_df).show(5)
tfidf_df = idf_pipeline.transform(imdb_reviews_df)
training_df, validation_df, testing_df = imdb_reviews_df.randomSplit([0.6, 0.3, 0.1], seed=0)
[training_df.count(), validation_df.count(), testing_df.count()]
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression().\
setLabelCol('score').\
setFeaturesCol('tfidf').\
setRegParam(0.0).\
setMaxIter(100).\
setElasticNetParam(0.)
lr_pipeline = Pipeline(stages=[idf_pipeline, lr]).fit(training_df)
lr_pipeline.transform(validation_df).\
select(fn.expr('float(prediction = score)').alias('correct')).\
select(fn.avg('correct')).show()
import pandas as pd
vocabulary = idf_pipeline.stages[0].stages[-1].vocabulary
weights = lr_pipeline.stages[-1].coefficients.toArray()
coeffs_df = pd.DataFrame({'word': vocabulary, 'weight': weights})
coeffs_df.sort_values('weight').head(5)
coeffs_df.sort_values('weight', ascending=False).head(5)
idf_pipeline.transform(training_df).\
select('id', fn.explode('words').alias('word')).\
where(fn.col('word') == 'helming').\
join(training_df, 'id').\
first()
lambda_par = 0.02
alpha_par = 0.3
en_lr = LogisticRegression().\
setLabelCol('score').\
setFeaturesCol('tfidf').\
setRegParam(lambda_par).\
setMaxIter(100).\
setElasticNetParam(alpha_par)
en_lr_pipeline = Pipeline(stages=[idf_pipeline, en_lr]).fit(training_df)
en_lr_pipeline.transform(validation_df).select(fn.avg(fn.expr('float(prediction = score)'))).show()
en_weights = en_lr_pipeline.stages[-1].coefficients.toArray()
en_coeffs_df = pd.DataFrame({'word': vocabulary, 'weight': en_weights})
en_coeffs_df.sort_values('weight').head(15)
en_coeffs_df.sort_values('weight', ascending=False).head(15)
en_coeffs_df.query('weight == 0.0').shape
en_coeffs_df.query('weight == 0.0').shape[0]/en_coeffs_df.shape[0]
en_coeffs_df.query('weight == 0.0').head(15)
from pyspark.ml.tuning import ParamGridBuilder
en_lr_estimator = Pipeline(stages=[idf_pipeline, en_lr])
grid = ParamGridBuilder().\
addGrid(en_lr.regParam, [0., 0.01, 0.02]).\
addGrid(en_lr.elasticNetParam, [0., 0.2, 0.4]).\
build()
grid
all_models = []
for j in range(len(grid)):
print("Fitting model {}".format(j+1))
model = en_lr_estimator.fit(training_df, grid[j])
all_models.append(model)
# estimate the accuracy of each of them:
accuracies = [m.\
transform(validation_df).\
select(fn.avg(fn.expr('float(score = prediction)')).alias('accuracy')).\
first().\
accuracy for m in all_models]
import numpy as np
best_model_idx = np.argmax(accuracies)
grid[best_model_idx]
best_model = all_models[best_model_idx]
accuracies[best_model_idx]
tweets_df = sqlContext.read.parquet('tweets.parquet')
tweets_df.show(5, truncate=False)
tweets_df.groupby('handle').agg(fn.count('*')).show()
best_model.transform(tweets_df.withColumnRenamed('text', 'review')).select('review', 'prediction').show()
%matplotlib inline
import seaborn
sentiment_pd = best_model.\
transform(tweets_df.withColumnRenamed('text', 'review')).\
groupby('handle').\
agg(fn.avg('prediction').alias('prediction'),
(2*fn.stddev('prediction')/fn.sqrt(fn.count('*'))).alias('err')).\
toPandas()
sentiment_pd.head()
sentiment_pd.plot(x='handle', y='prediction', xerr='err', kind='barh');
best_model.\
transform(tweets_df.withColumnRenamed('text', 'review')).\
where(fn.col('handle') == '@realDonaldTrump').\
where(fn.col('prediction') == 0).\
select('review').\
take(5)
best_model.\
transform(tweets_df.withColumnRenamed('text', 'review')).\
where(fn.col('handle') == '@HillaryClinton').\
where(fn.col('prediction') == 0).\
select('review').\
take(5)
from pyspark.sql import types
def probability_positive(probability_column):
return float(probability_column[1])
func_probability_positive = fn.udf(probability_positive, types.DoubleType())
prediction_probability_df = best_model.transform(validation_df).\
withColumn('probability_positive', func_probability_positive('probability')).\
select('id', 'review', 'score', 'probability_positive')
prediction_probability_df.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to dataframes
Step2: From the previous RDDs, we can call the toDF method and specify the name of columns
Step3: Spark will automatically try to guess the column types. We can take a look at those types
Step4: Similar to SQL, we can apply a function to a column or several columns.
Step5: As you can see, the function is not evaluated until an action (e.g., take, show, collect) is taken
Step6: We can cross (e.g., join) two dataframes ala SQL
Step7: We can also do outer joins
Step8: We can apply group functions
Step9: We can change the name of computed columns
Step10: Add columns
Step11: There are many, many types of functions. E.g., see here
Step12: Almost all transfomers and estimator require you to specificy the input column of the dataframe and the output column that will be added to the dataframe.
Step13: We can now transform the dataframe
Step14: CountVectorizer
Step15: A CountVectorizer is different from a Tokenizer because it needs to learn how many different tokens there are in the input column. With that number, it will output vectors with consistent dimensions. Therefore, CountVectorizer is an Estimator that, when fitted, returns a Transformer.
Step16: Now we need to user the words column that generated by the tokenizer transformer
Step17: which results in
Step18: The column features is a sparse vector representation. For example, for the first document, we have three features present
Step19: Pipelines
Step20: In more complex scenarios, you can even chain Pipeline transformers. We will see this case in the actual use case below.
Step21: Load sentiment data
Step22: The schema is very simple
Step23: Lets see how many of each category we have
Step24: We have almost two times the number of negative words!
Step25: Let's take a look at a positive review
Step26: And a negative one
Step27: The first problem that we encounter is that the reviews are in plain text. We need to split the words and then match them to sentiment_df.
Step28: RegexTokenizer extracts a sequence of matches from the input text. Regular expressions are a powerful tool to extract strings with certain characteristics.
Step29: The pattern \p{L}+ means that it will extract letters without accents (e.g., it will extract "Acuna" from "Acuña"). setGaps means that it will keep applying the rule until it can't extract new words. You have to set the input column from the incoming dataframe (in our case the review column) and the new column that will be added (e.g., words).
Step30: Applying the transformation doesn't actually do anything until you apply an action. But as you can see, a new column words of type array of string was added by the transformation. We can see how it looks
Step31: Now, we want to match every word from sentiment_df in the array words shown before. One way of doing this is to explode the column words to create a row for each element in that list. Then, we would join that result with the dataframe sentiment to continue further.
Step32: Now if we join that with sentiment, we can see if there are positive and negative words in each review
Step33: Now we can simply average the sentiment per review id and, say, pick positive when the average is above 0, and negative otherwise.
Step34: Now, lets compute the accuracy of our prediction
Step35: Not bad with such a simple approach! But can we do better than this?
Step36: Finally, for this initial Pipeline, we define a counter vectorizer estimator
Step37: The term frequency vector is represented with a sparse vector. We have 26,384 terms.
Step38: Therefore, the idf_pipeline takes the raw text from the datafarme imdb_reviews_df and creates a feature vector vector called tfidf!
Step39: Data science pipeline for estimating sentiments
Step40: One immediately apparent problem is that the number of features in the dataset is far larger than the number of training examples. This can lead to serious overfitting.
Step41: Lets create a pipeline transformation by chaining the idf_pipeline with the logistic regression step (lr)
Step42: Lets estimate the accuracy
Step43: The performance is much better than before.
Step44: The most negative words are
Step45: And the most positive
Step46: But none of them make sense. What is happening? We are overfitting the data. Those words that don't make sense are capturing just noise in the reviews.
Step47: Regularization
Step48: And we define a new Pipeline
Step49: Let's look at the performance
Step50: We improve performance slightly, but whats more important is that we improve the understanding of the word sentiments. Lets take at the weights
Step51: The most negative words all make sense ("worst" is actually more negative than than "worse")!
Step52: Same thing with positive words
Step53: Are there words with literarily zero importance for predicting sentiment? Yes, and most of them!
Step54: In fact, more than 95% of features are not needed to achieve a better performance than all previous models!
Step55: Let's look at these neutral words
Step56: But, did we choose the right $\lambda$ and $\alpha$ parameters? We should run an experiment where we try different combinations of them. Fortunately, Spark let us do this by using a grid - a method that generates combination of parameters.
Step57: We need to build a new estimator pipeline
Step58: This is the list of parameters that we will try
Step59: So the best model we found has the following parameters
Step60: Finally, predicting tweet sentiments
Step61: We have 1K tweets from each candidate
Step62: We can now predict the sentiment of the Tweet using our best model, we need to rename the column so that it matches our previous pipeline (review => ...)
Step63: Now, lets summarize our results in a graph!
Step64: But let's examine some "negative" tweets by Trump
Step65: And Clinton
Step66: As you can see, there are lots of room for improvement.
|
7,389
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
n = 20
sigma = 1.0
xdata = np.linspace(-2, 2, n)
fdata = 3*xdata**2 + 2*xdata + 1 + np.random.randn(n)*sigma
plt.figure()
plt.plot(xdata, fdata, 'o')
plt.xlabel('x')
plt.ylabel('f')
plt.show()
Psi = np.zeros((n, 3))
Psi[:, 0] = xdata**2
Psi[:, 1] = xdata
Psi[:, 2] = 1
w, residuals, rank, s = np.linalg.lstsq(Psi, fdata)
print w
nfull = 200
xfull = np.linspace(-2, 2, nfull)
Psifull = np.zeros((nfull, 3))
Psifull[:, 0] = xfull**2
Psifull[:, 1] = xfull
Psifull[:, 2] = 1
ffull = np.dot(Psifull, w)
plt.figure()
plt.plot(xdata, fdata, 'o')
plt.plot(xfull, ffull, '--')
plt.xlabel('x')
plt.ylabel('f')
plt.show()
def func(x):
sigma = 1.0
return (6*x-2)**2*np.sin(12*x-4) + np.random.randn(len(x))*sigma
# ---- create training data ---------
ndata = 20
xdata = np.linspace(0, 1, ndata)
fdata = func(xdata)
plt.plot(xdata, fdata, 'o')
plt.xlabel('x')
plt.ylabel('f')
plt.show()
def getPsi(x, order):
n = len(x)
Psi = np.zeros((n, order+1))
for i in range(order+1):
Psi[:, i] = x**(order-i)
return Psi
def createpoly(x, f, order):
Psi = getPsi(x, order)
w, residuals, rank, s = np.linalg.lstsq(Psi, f)
return w
def evalpoly(x, w):
order = len(w) - 1
Psi = getPsi(x, order)
f = np.dot(Psi, w)
return f
ordervec = np.arange(1, 21)
error = np.zeros(20)
for idx, order in enumerate(ordervec):
# build a polynomial model from the training data
w = createpoly(xdata, fdata, order)
# test the error
fhat = evalpoly(xdata, w)
error[idx] = np.linalg.norm(fhat - fdata)
# plot error
plt.figure()
plt.plot(ordervec, error, 'o')
plt.xlabel('order of polynomial')
plt.ylabel('error')
plt.show()
order = 20
w = createpoly(xdata, fdata, order)
nhat = 200
xhat = np.linspace(0, 1, nhat)
fhat = evalpoly(xhat, w)
plt.figure()
plt.plot(xdata, fdata, 'o')
plt.plot(xhat, fhat, '--')
plt.ylim([-10, 20])
plt.xlabel('x')
plt.ylabel('f')
div = 10 # we will divide our data into div segments
ndata = 20 # number of data points
arrlength = ndata/div # each segment should contain this much data
idxrand = np.random.permutation(n) # random index into data from 0 ... n
error = np.zeros(len(ordervec))
# iterate through polynomial orders
for i, order in enumerate(ordervec):
# iterate through divisions of data for k-holdout
for j in range(div):
# indicies of data to leave out from the random permutations
holdout = idxrand[arrlength*j:arrlength*(j+1)]
# separaet into training set and testing set
xtrain = np.delete(xdata, holdout)
ftrain = np.delete(fdata, holdout)
xtest = xdata[holdout]
ftest = fdata[holdout]
# build a polynomial model from the training data
w = createpoly(xtrain, ftrain, order)
# test the error with the validation set
fhat = evalpoly(xtest, w)
error[i] += np.linalg.norm(fhat - ftest) / div # average error across divisions
# plot error
plt.figure()
plt.plot(ordervec, error, 'o')
plt.xlabel('order of polynomial')
plt.ylabel('error')
plt.show()
# plot error
plt.figure()
plt.plot(ordervec, error, 'o')
plt.xlabel('order of polynomial')
plt.ylabel('error')
plt.ylim([0, 25])
plt.show()
order = 4
w = createpoly(xdata, fdata, order)
nhat = 200
xhat = np.linspace(0, 1, nhat)
fhat = evalpoly(xhat, w)
plt.figure()
plt.plot(xdata, fdata, 'o')
plt.plot(xhat, fhat, '--')
plt.ylim([-10, 20])
plt.xlabel('x')
plt.ylabel('f')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This data is our training data and consists of two pairs
Step2: Ideally w would be [3, 2, 1] bsaed on our underlying polynomial, but it won't recover that exactly because of the noise in the function.
Step3: Cross Validation
Step4: Visually, it's harder to determine what the best order for the polynomial is. Of course, in higher dimensions we cannot visualize the function and so generally won't know beforehand what terms to use in our polynomial model. We will use cross validation to help us choose an appropriate order. Below, we create a function to create and evaluate a polynomial for any order (in a 1D space)
Step5: Let's try different polynomial orders and check to see what the error is in our fit.
Step6: This suggests that the higher the order of the polynomial the better! Of course we know that's not true. Let's look at what the polynomial model looks like in 20 dimensions
Step7: We notice that the error at the points we are trying to fit is very small (which is what our least squares solution is doing), but the predictive capability of the model is very poor. The reason for this issue is that we tested our model using the same points we used to train our model, so of course the error was low. What we need to do instead is keep a separate set of training data and separate set of validation data to test how good the model is. There are many methods to do this. In this example, we use k-hold out cross validation.
Step8: Both plots are the same, but the axis is smaller on the bottom one because the error blows up quickly. Notice that now, instead of the error continualy decreasing, it decreases for a while then increases as we run into problems with overfitting. Generally, we like to choose the simplest model that gives reasonable error. The curve is pretty flat near the minimum, and in this case a good point is somewhere around a 4th order polynomial.
|
7,390
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import pymc3 as pm
import seaborn as sns
SEED = 383561
np.random.seed(SEED) # from random.org, for reproducibility
N = 1000
W = np.array([0.35, 0.4, 0.25])
MU = np.array([0., 2., 5.])
SIGMA = np.array([0.5, 0.5, 1.])
component = np.random.choice(MU.size, size=N, p=W)
x = np.random.normal(MU[component], SIGMA[component], size=N)
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(x, bins=30, normed=True, lw=0);
with pm.Model() as model:
w = pm.Dirichlet('w', np.ones_like(W))
mu = pm.Normal('mu', 0., 10., shape=W.size)
tau = pm.Gamma('tau', 1., 1., shape=W.size)
x_obs = pm.NormalMixture('x_obs', w, mu, tau=tau, observed=x)
with model:
step = pm.Metropolis()
trace_ = pm.sample(20000, step, random_seed=SEED)
trace = trace_[10000::10]
pm.traceplot(trace, varnames=['w', 'mu']);
with model:
ppc_trace = pm.sample_ppc(trace, 5000, random_seed=SEED)
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(x, bins=30, normed=True,
histtype='step', lw=2,
label='Observed data');
ax.hist(ppc_trace['x_obs'], bins=30, normed=True,
histtype='step', lw=2,
label='Posterior predictive distribution');
ax.legend(loc=1);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gaussian mixtures are a flexible class of models for data that exhibits subpopulation heterogeneity. A toy example of such a data set is shown below.
Step2: A natural parameterization of the Gaussian mixture model is as the latent variable model
Step3: We see in the following plot that the posterior distribution on the weights and the component means has captured the true value quite well.
Step4: We can also sample from the model's posterior predictive distribution, as follows.
Step5: We see that the posterior predictive samples have a distribution quite close to that of the observed data.
|
7,391
|
<ASSISTANT_TASK:>
Python Code:
def string_to_kmers(s, length):
return [s[i:i+length] for i in range(len(s)-length+1)]
def minimizer(k, l):
Given k-mer, return its minimal l-mer
assert l <= len(k)
return min(string_to_kmers(k, l))
minimizer('ABC', 2)
minimizer('abracadabra', 4)
minimizer('abracadabr', 4), minimizer('bracadabra', 4)
# you might need to 'pip install mmh3' first
import mmh3
mmh3.hash('abracadabr') % 256, mmh3.hash('bracadabra') % 256
import random
random.seed(629)
def random_kmer(k):
return ''.join([random.choice('ACGT') for _ in range(k)])
%matplotlib inline
import matplotlib.pyplot as plt
def plot_counts(counter, title=None):
idx = range(256)
cnts = list(map(lambda x: counter.get(x, 0), idx))
plt.bar(idx, cnts, ec='none')
plt.xlim(0, 256)
plt.ylim(0, 35)
if title is not None:
plt.title(title)
plt.show()
from collections import Counter
# hash 1000 random 10-mers
cnt = Counter([mmh3.hash(s) % 256 for s in [random_kmer(10) for _ in range(1000)]])
plot_counts(cnt, 'Frequency of partitions using hash mod 256')
def lmer_to_int(mer):
Maps AAAA to 0, AAAC to 1, etc. Works for any length argument.
cum = 0
charmap = {'A':0, 'C':1, 'G':2, 'T':3}
for c in mer:
cum *= 4
cum += charmap[c]
return cum
# get minimal 4-mers from 1000 random 10-mers
cnt = Counter([lmer_to_int(minimizer(s, 4)) for s in [random_kmer(10) for _ in range(1000)]])
plot_counts(cnt, 'Frequency of partitions using minimal 4-mer; AAAA at left, TTTT at right')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Minimizers
Step2: But if our goal is to partition the space of $k$-mers, couldn't we use a hash function instead? Say $k$ is 10 and $l$ is 4. A 10,4-minimizing scheme is a way for dividing the space of $4^{10}$ 10-mers (a million or so) into $4^4 = 256$ partitions. We can accomplish this with a hash function that maps $k$-mers to integers in $[0, 255]$. Why would we prefer minimizers over hash functions?
Step3: But their hash values (modulo 256) are not the same
Step5: Partition size distribution
|
7,392
|
<ASSISTANT_TASK:>
Python Code:
import sys,os
%matplotlib inline
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from numpy.fft import fft2
from numpy.fft import ifft2
def iadftmatrix(N):
x = np.arange(N).reshape(N,1)
u = x
Wn = np.exp(-1j*2*np.pi/N)
A = (1./np.sqrt(N)) * (Wn ** np.dot(u, x.T))
return A
A = iadftmatrix(128)
plt.figure(1)
plt.imshow((A.real),cmap='gray')
plt.title('A.real')
plt.colorbar()
plt.figure(2)
plt.imshow((A.imag),cmap='gray')
plt.title('A.imag')
plt.colorbar()
plt.figure(1, figsize=(10,10))
plt.subplot(2,2,1)
plt.title('u = 0')
plt.plot(A.real[0,:], 'r')
plt.plot(A.imag[0,:], 'g')
plt.subplot(2,2,2)
plt.title('u = 1')
plt.plot(A.real[1,:], 'r')
plt.plot(A.imag[1,:], 'g')
plt.subplot(2,2,3)
plt.title('u = 2')
plt.plot(A.real[2,:], 'r')
plt.plot(A.imag[2,:], 'g')
plt.subplot(2,2,4)
plt.title('u = 3')
plt.plot(A.real[3,:], 'r')
plt.plot(A.imag[3,:], 'g')
plt.show()
plt.figure(1, figsize=(10,10))
plt.subplot(2,2,1)
plt.title('u = 0')
plt.plot(A.real[ 0,:], 'r')
plt.plot(A.imag[ 0,:], 'g')
plt.subplot(2,2,2)
plt.title('u = -1')
plt.plot(A.real[-1,:], 'r')
plt.plot(A.imag[-1,:], 'g')
plt.subplot(2,2,3)
plt.title('u = -2')
plt.plot(A.real[-2,:], 'r')
plt.plot(A.imag[-2,:], 'g')
plt.subplot(2,2,4)
plt.title('u = -3')
plt.plot(A.real[-3,:], 'r')
plt.plot(A.imag[-3,:], 'g')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Função iadftmatrix
Step2: Kernel images generated
Step3: Four first lines
Step4: Showing complex conjugates
|
7,393
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [-9, -7]
regularization_strengths = [5, 7]
results = {}
best_val = -1
best_svm = None
for _ in np.arange(50):
i = 10 ** np.random.uniform(low=learning_rates[0], high=learning_rates[1])
j = 10 ** np.random.uniform(low=regularization_strengths[0], high=regularization_strengths[1])
svm = LinearSVM()
svm.train(X_train_feats, y_train, learning_rate=i, reg=j,
num_iters=500, verbose=False)
y_train_pred = svm.predict(X_train_feats)
y_val_pred = svm.predict(X_val_feats)
accuracy = (np.mean(y_train == y_train_pred), np.mean(y_val == y_val_pred))
results[(i, j)] = accuracy
if accuracy[1] > best_val:
best_val = accuracy[1]
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Get the best hyperparameter from result
best_lr = 0.0
best_reg = 0.0
for lr, reg in results:
if results[(lr, reg)][1] == best_val:
best_lr = lr
best_reg = reg
break
print 'Best learning rate: %f, best regularisation strength: %f' % (best_lr, best_reg, )
# Train the classifier with the best hyperparameters
best_svm = LinearSVM()
loss_hist = best_svm.train(X_train_feats, y_train, learning_rate=best_lr, reg=best_reg,
num_iters=2000, verbose=True)
# plot the loss as a function of iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print test_accuracy
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
print X_train_feats.shape
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 200
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
learning = [1e-5, 1]
regularization = [1e0, 1e4]
decay = [0.9, 1]
results = {}
best_val = -1
for _ in np.arange(0, 50):
i = np.random.uniform(low=learning[0], high=learning[1])
j = np.random.uniform(low=regularization[0], high=regularization[1])
k = np.random.uniform(low=decay[0], high=decay[1])
# Train the network
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=500, batch_size=200,
learning_rate=i, learning_rate_decay=k,
reg=j, verbose=False)
# Predict on the validation set
val_acc = (net.predict(X_val_feats) == y_val).mean()
results[(i, j, k)] = val_acc
if val_acc > best_val:
best_val = val_acc
best_net = net
for i, j, k in results:
print 'lr: %f, reg: %f, dec: %f -> %f' % (i, j, k, results[(i, j, k)])
print best_val
# Find the best learning rate and regularization strength
best_lr = 0.
best_reg = 0.
best_decay = 0.
for lr, reg, dec in sorted(results):
if results[(lr, reg, dec)] == best_val:
best_lr = lr
best_reg = reg
best_decay = dec
break
print best_lr, best_decay, best_reg
stats = best_net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=2000, batch_size=400,
learning_rate=best_lr, learning_rate_decay=best_decay,
reg=best_reg, verbose=True)
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (best_net.predict(X_test_feats) == y_test).mean()
print test_acc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Inline question 1
|
7,394
|
<ASSISTANT_TASK:>
Python Code:
import sqlalchemy
print(sqlalchemy.__version__)
from sqlalchemy import create_engine
engine = create_engine('sqlite:///users_data.db', echo=True)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from sqlalchemy import Column, Integer, String
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password)
print(User.__table__)
Base.metadata.create_all(engine)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
session_dec = Session()
print(dir(session_dec))
ed_user = User(name='Meenu', fullname='Meenakshi Johri', password='meenuInIndia')
session.add(ed_user)
#Now import the model module and create a new page:
user = User(name='GV',fullname='GV', password='gv@ibm')
print(user.name)
#Add the object to the session:
session.add(user)
print(user.id)
# At this point the test_page object is known to SQLAlchemy, but not to the database.
# To send it to the database, a flush operation can be forced:
session.flush()
print(user.id)
#Now let’s commit the changes:
session.commit()
#SQLAlchemy sends the COMMIT statement that permanently commits the flushed changes and ends the transaction.
# Delete
#To delete the test_page object from the database you would use:
session.delete(user)
session.flush()
#At this point you can either commit the transaction or do a rollback. Let’s do a rollback this time:
session.rollback()
Import the session object from the object_test module:
>>> from object_test import session
Now import the model module and create a new page:
>>> import model
>>> test_page = model.Page()
>>> test_page.title = u'Test Page'
>>> test_page.content = u'Test content'
>>> test_page.title
u'Test Page'
Add the object to the session:
>>> session.add(test_page)
>>> print test_page.id
None
At this point the test_page object is known to SQLAlchemy, but not to the database. To send it to the database, a flush operation can be forced:
>>> session.flush()
>>> print test_page.id
1
Now let’s commit the changes:
>>> session.commit()
SQLAlchemy sends the COMMIT statement that permanently commits the flushed changes and ends the transaction.
Delete
To delete the test_page object from the database you would use:
>>> session.delete(test_page)
>>> session.flush()
At this point you can either commit the transaction or do a rollback. Let’s do a rollback this time:
>>> session.rollback()
SQLAlchemy sends a ROLLBACK statement to the database.
Query
Queries are performed with query objects that are created from the session. The simplest way to create and use a query object is like this:
>>> page_q = session.query(model.Page)
>>> for page in page_q:
... print page.title
Test Page
Try the following statements and observe the SQL queries sent to the database by SQLAlchemy:
>>> page_q.all()
>>> page = page_q.first()
>>> page.title
>>> page_q[2:5]
>>> page_q.get(1)
Working with Objects
Now let’s think about how you could add a comment to a page. One approach would be to insert a new row in the comment table using the SQL Expression API, ensuring that the pageid field contained the value 1 so that the comment was associated with the correct page via a foreign key. The Object-Relational API provides a much better approach:
>>> comment1 = model.Comment()
>>> comment1.name= u'James'
>>> comment1.email = u'james@example.com'
>>> comment1.content = u'This page needs a bit more detail ;-)'
>>> comment2 = model.Comment()
>>> comment2.name = u'Mike'
>>> comment2.email = u'mike@example.com'
>>> page.comments.append(comment1)
>>> page.comments.append(comment2)
>>> session.commit()
The interesting thing to note is that rather than having manually set each comment’s .pageid attribute, you simply appended the comments to the page’s .comments attribute. Note also that there was no need to explicitely add the comments to the session, SQLAlchemy was smart enough to realize that they have been appended to an object that was already in the session.
def setUp(self):
self._engine = sqlalchemy.create_engine("sqlite:///:memory:")
self._metadata = sqlalchemy.MetaData()
self._metadata.bind = self._engine
self._sports_table = sqlalchemy.Table("sports", self._metadata,
sqlalchemy.Column("id_sports", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(100)),
sqlalchemy.Column("weight", sqlalchemy.Float),
sqlalchemy.Column("color", sqlalchemy.String(6)),
sqlalchemy.Column("met", sqlalchemy.Float),
sqlalchemy.Column("max_pace", sqlalchemy.Integer)
)
self._metadata.drop_all()
self._metadata.create_all()
def setUp(self):
self.engine = create_engine('sqlite:///:memory:')
self.session = self._create_session()
self._populate_database()
self.query = self.session.query(User).order_by(asc(User.id))
self.proxy = QueryResultProxy(self.query)
def db_setup(test_subj, dbname=TEST_DB_NAME, dbdump=TEST_DB_DUMP, echo=False):
Sets up the db for use by a given test subject.
test_subj must be an instance of DbTestFixture (or inheritated class),
or the class itself. This allows using db_setup by
- unittest setUp (instance method), or
- unittest setUpClass (class method).
try:
pg_createdb(dbname)
except subprocess.CalledProcessError: # try recovering once, in case
pg_dropdb(dbname) # the db already existed
pg_createdb(dbname)
test_subj.dbname = dbname
test_subj.db = sqlalchemy.create_engine(
'postgresql:///' + dbname, echo=echo)
pg_restore(dbname, dbdump)
Session = sqlalchemy.orm.sessionmaker()
test_subj.session = Session(bind=test_subj.db)
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///school.db', echo=True)
Base = declarative_base()
class School(Base):
__tablename__ = "woot"
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
Base.metadata.create_all(engine)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python's SQLAlchemy and Declarative
Step2: we will use an in-memory-only SQLite database. To connect we use create_engine()
Step3: Now that we have a “base”, we can define any number of mapped classes in terms of it. We will start with just a single table called users, which will store records for the end-users using our application. A new class called User will be the class to which we map this table. Within the class, we define details about the table to which we’ll be mapping, primarily the table name, and names and datatypes of columns
Step4: A class using Declarative at a minimum needs a _tablename_ attribute, and at least one Column which is part of a primary key [1]. SQLAlchemy never makes any assumptions by itself about the table to which a class refers, including that it has no built-in conventions for names, datatypes, or constraints. But this doesn’t mean boilerplate is required; instead, you’re encouraged to create your own automated conventions using helper functions and mixin classes, which is described in detail at Mixin and Custom Base Classes.
Step5: Creating a Session
Step6: This custom-made Session class will create new Session objects which are bound to our database. Other transactional characteristics may be defined when calling sessionmaker as well; these are described in a later chapter. Then, whenever you need to have a conversation with the database, you instantiate a Session
Step7: The above Session is associated with our SQLite-enabled Engine, but it hasn’t opened any connections yet. When it’s first used, it retrieves a connection from a pool of connections maintained by the Engine, and holds onto it until we commit all changes and/or close the session object.
Step8: Check examples from 1 to 4
Step10: Other notable examples (Review in your free time)
|
7,395
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import time
import helper
source_path = 'data/letters_source.txt'
target_path = 'data/letters_target.txt'
source_sentences = helper.load_data(source_path)
target_sentences = helper.load_data(target_path)
source_sentences[:50].split('\n')
target_sentences[:50].split('\n')
def extract_character_vocab(data):
special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']
set_words = set([character for line in data.split('\n') for character in line])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return int_to_vocab, vocab_to_int
# Build int2letter and letter2int dicts
source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences)
target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences)
# Convert characters to ids
source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<UNK>']) for letter in line] for line in source_sentences.split('\n')]
target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>']) for letter in line] + [target_letter_to_int['<EOS>']] for line in target_sentences.split('\n')]
print("Example source sequence")
print(source_letter_ids[:3])
print("\n")
print("Example target sequence")
print(target_letter_ids[:3])
def pad_id_sequences(source_ids, source_letter_to_int, target_ids, target_letter_to_int, sequence_length):
new_source_ids = [sentence + [source_letter_to_int['<pad>']] * (sequence_length - len(sentence)) \
for sentence in source_ids]
new_target_ids = [sentence + [target_letter_to_int['<pad>']] * (sequence_length - len(sentence)) \
for sentence in target_ids]
return new_source_ids, new_target_ids
This is the final shape we need them to be in. We can now proceed to building the model.
from distutils.version import LooseVersion
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Number of Epochs
epochs = 60
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 15
decoding_embedding_size = 15
# Learning Rate
learning_rate = 0.001
def get_model_inputs():
input_data = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length
def encoding_layer(input_data, rnn_size, num_layers,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size)
# RNN cell
def make_cell(rnn_size):
enc_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return enc_cell
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
return enc_output, enc_state
# Process the input we'll feed to the decoder
def process_decoder_input(target_data, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size,
target_sequence_length, max_target_sequence_length, enc_state, dec_input):
# 1. Decoder Embedding
target_vocab_size = len(target_letter_to_int)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# 2. Construct the decoder cell
def make_cell(rnn_size):
dec_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return dec_cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# 3. Dense layer to translate the decoder's output at each time
# step into a choice from the target vocabulary
output_layer = Dense(target_vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
# 4. Set up a training decoder and an inference decoder
# Training Decoder
with tf.variable_scope("decode"):
# Helper for the training process. Used by BasicDecoder to read inputs.
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
# Basic decoder
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
training_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
training_decoder_output = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
# 5. Inference Decoder
# Reuses the same parameters trained by the training process
with tf.variable_scope("decode", reuse=True):
start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens')
# Helper for the inference process.
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
start_tokens,
target_letter_to_int['<EOS>'])
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
return training_decoder_output, inference_decoder_output
def seq2seq_model(input_data, targets, lr, target_sequence_length,
max_target_sequence_length, source_sequence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers):
# Pass the input data through the encoder. We'll ignore the encoder output, but use the state
_, enc_state = encoding_layer(input_data,
rnn_size,
num_layers,
source_sequence_length,
source_vocab_size,
encoding_embedding_size)
# Prepare the target sequences we'll feed to the decoder in training mode
dec_input = process_decoder_input(targets, target_letter_to_int, batch_size)
# Pass encoder state and decoder inputs to the decoders
training_decoder_output, inference_decoder_output = decoding_layer(target_letter_to_int,
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
max_target_sequence_length,
enc_state,
dec_input)
return training_decoder_output, inference_decoder_output
# Build the graph
train_graph = tf.Graph()
# Set the graph to default to ensure that it is ready for training
with train_graph.as_default():
# Load the model inputs
input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_model_inputs()
# Create the training and inference logits
training_decoder_output, inference_decoder_output = seq2seq_model(input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
len(source_letter_to_int),
len(target_letter_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
# Create tensors for the training logits and inference logits
training_logits = tf.identity(training_decoder_output.rnn_output, 'logits')
inference_logits = tf.identity(inference_decoder_output.sample_id, name='predictions')
# Create the weights for sequence_loss
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
def pad_sentence_batch(sentence_batch, pad_int):
Pad sentences with <PAD> so that each sentence of a batch has the same length
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int):
Batch targets, sources, and the lengths of their sentences together
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_targets_batch, pad_sources_batch, pad_targets_lengths, pad_source_lengths
# Split data to training and validation sets
train_source = source_letter_ids[batch_size:]
train_target = target_letter_ids[batch_size:]
valid_source = source_letter_ids[:batch_size]
valid_target = target_letter_ids[:batch_size]
(valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>']))
display_step = 20 # Check training loss after every 20 batches
checkpoint = "best_model.ckpt"
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(1, epochs+1):
for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(
get_batches(train_target, train_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'])):
# Training step
_, loss = sess.run(
[train_op, cost],
{input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths})
# Debug message updating us on the status of the training
if batch_i % display_step == 0 and batch_i > 0:
# Calculate validation cost
validation_loss = sess.run(
[cost],
{input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths})
print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f} - Validation loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0]))
# Save Model
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved')
input_sentence = 'fjeixg'
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
#Multiply by batch_size to match the model's input parameters
answer_logits = sess.run(logits, {input_data: [text]*batch_size,
target_sequence_length: [len(text)]*batch_size,
source_sequence_length: [len(text)]*batch_size})[0]
pad = source_letter_to_int["<PAD>"]
print('Original Text:', input_sentence)
print('\nSource')
print(' Word Ids: {}'.format([i for i in text]))
print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text])))
print('\nTarget')
print(' Word Ids: {}'.format([i for i in answer_logits if i != pad]))
print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's start by examining the current state of the dataset. source_sentences contains the entire input sequence file as text delimited by newline symbols.
Step2: target_sentences contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from source_sentences. target_sentences contains a sorted characters of the line.
Step3: Preprocess
Step4: The last step in the preprocessing stage is to determine the the longest sequence size in the dataset we'll be using, then pad all the sequences to that length.
Step5: Model
Step6: Hyperparameters
Step7: Input
Step8: Sequence to Sequence Model
Step9: 2.2 Decoder
Step10: Set up the decoder components
Step11: 2.3 Seq2seq model
Step12: Model outputs training_decoder_output and inference_decoder_output both contain a 'rnn_output' logits tensor that looks like this
Step15: Get Batches
Step16: Train
Step17: Prediction
|
7,396
|
<ASSISTANT_TASK:>
Python Code:
from pysap.SAPCAR import *
from IPython.display import display
with open("some_file", "w") as fd:
fd.write("Some string to compress")
f0 = SAPCARArchive("archive_file.car", mode="wb", version=SAPCAR_VERSION_200)
f0.add_file("some_file")
f0._sapcar.canvas_dump()
f0._sapcar.files0[0].canvas_dump()
f0._sapcar.files0[0].blocks[0].canvas_dump()
f0._sapcar.files0[0].blocks[0].compressed.canvas_dump()
f1 = SAPCARArchive("archive_file.car", mode="wb", version=SAPCAR_VERSION_201)
f1.add_file("some_file")
f1._sapcar.canvas_dump()
f1._sapcar.files1[0].canvas_dump()
f1._sapcar.files1[0].blocks[0].canvas_dump()
f1._sapcar.files1[0].blocks[0].compressed.canvas_dump()
from os import remove
remove("some_file")
remove("archive_file.car")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SAPCAR Archive version 2.00
Step2: The file is comprised of the following main structures
Step3: SAPCAR Entry Header
Step4: SAPCAR Data Block
Step5: SAPCAR Compressed Data
Step6: SAPCAR Archive version 2.01
Step7: The file is comprised of the following main structures
Step8: SAPCAR Entry Header
Step9: SAPCAR Data Block
Step10: SAPCAR Compressed data
|
7,397
|
<ASSISTANT_TASK:>
Python Code:
# we need this for later:
%matplotlib inline
import json
INPUT_FILE = "all_the_data.json"
with open(INPUT_FILE, "r") as big_data_fd:
big_data = json.load(big_data_fd)
print "We have {} posts".format(len(big_data))
import os
print "The source file is {} bytes. Pathetic.".format(os.stat(INPUT_FILE).st_size)
import itertools
all_the_fields = set(itertools.chain.from_iterable(big_data))
print "We have {} different field names:".format(len(all_the_fields))
print all_the_fields
import random
import pprint
# re-run this as much as you like to inspect different items
pprint.pprint(random.choice(big_data))
pprint.pprint(big_data[234])
pprint.pprint(big_data[234]['comments'][0]['data'][1]['message_tags'])
set([len(data['comments']) for data in big_data if 'comments' in data])
multi_item_comment_lists = [data['comments'] for data in big_data if ('comments' in data) and (len(data['comments']) > 1)]
print len(multi_item_comment_lists)
pprint.pprint(multi_item_comment_lists[0])
def flatten_comments_pages(post):
flattened_comments = []
for page in post:
flattened_comments += page['data']
return flattened_comments
post_comments_paged = multi_item_comment_lists[0]
print "Post has {} comments".format(len(flatten_comments_pages(post_comments_paged)))
comments_threads = [data['comments'] for data in big_data if 'comments' in data]
count_of_posts_with_no_comments = len(big_data) - len(comments_threads)
comments_counts = [0] * count_of_posts_with_no_comments
comments_counts += [len(flatten_comments_pages(thread)) for thread in comments_threads]
import matplotlib.pyplot as plt
plt.hist(comments_counts, bins=max(comments_counts))
plt.title("Comments-per-post Histogram")
plt.xlabel("Comments per post")
plt.ylabel("Frequency")
plt.show()
likes_threads = [data['likes']['data'] for data in big_data if 'likes' in data]
count_of_posts_with_no_likes = len(big_data) - len(likes_threads)
likes_counts = [0] * count_of_posts_with_no_likes
likes_counts += [len(thread) for thread in likes_threads]
plt.hist(likes_counts, bins=max(likes_counts))
plt.title("Likes-per-post Histogram")
plt.xlabel("Likes per post")
plt.ylabel("Frequency")
plt.show()
import datetime
import dateutil
import pytz
def epoch_utc_s(date_string):
dt_local = dateutil.parser.parse(str(date_string))
dt_utc = dt_local.astimezone(pytz.utc)
nineteenseventy = datetime.datetime(1970,1,1)
epoch_utc = dt_utc.replace(tzinfo=None) - nineteenseventy
return int(epoch_utc.total_seconds())
posts_without_likes = [data for data in big_data if 'likes' not in data]
posts_with_likes = [data for data in big_data if 'likes' in data]
timestamps_of_posts_without_likes = [epoch_utc_s(post['created_time']) for post in posts_without_likes]
timestamps_of_posts_with_likes = [epoch_utc_s(post['created_time']) for post in posts_with_likes]
import numpy
median_epoch_liked = int(numpy.median(timestamps_of_posts_with_likes))
median_epoch_non_liked = int(numpy.median(timestamps_of_posts_without_likes))
print "Median timestamp of posts without likes: {} ({})".format(datetime.datetime.fromtimestamp(median_epoch_non_liked),
median_epoch_non_liked)
print "Median timestamp of posts with likes: {} ({})".format(datetime.datetime.fromtimestamp(median_epoch_liked),
median_epoch_liked)
plt.hist(timestamps_of_posts_without_likes, alpha=0.5, label='non-Liked posts')
plt.hist(timestamps_of_posts_with_likes, alpha=0.5, label='Liked posts')
plt.title("Liked vs non-Liked posts")
plt.xlabel("Time (epoch UTC s)")
plt.ylabel("Count of posts")
plt.legend(loc='upper left')
plt.show()
def less_than_n_days_ago(date_string, n):
query_date = epoch_utc_s(date_string)
today_a_year_ago = epoch_utc_s(datetime.datetime.now(pytz.utc) - datetime.timedelta(days=n))
return query_date > today_a_year_ago
# try changing this variable then re-running this cell...
days_ago = 30
# create a slice of our big_data containing only posts created n days ago
recent_data = [data for data in big_data if less_than_n_days_ago(data['created_time'], days_ago)]
# plot the Likes-per-post Histogram for recent_data
recent_likes_threads = [data['likes']['data'] for data in recent_data if 'likes' in data]
recent_count_of_posts_with_no_likes = len(recent_data) - len(recent_likes_threads)
recent_likes_counts = [0] * recent_count_of_posts_with_no_likes
recent_likes_counts += [len(thread) for thread in recent_likes_threads]
plt.hist(recent_likes_counts, bins=max(recent_likes_counts))
plt.title("Likes-per-post Histogram (last {} days)".format(days_ago))
plt.xlabel("Likes per post")
plt.ylabel("Frequency")
plt.show()
# plot the Comment-per-post Histogram for recent_data
recent_comments_threads = [data['comments'] for data in recent_data if 'comments' in data]
recent_count_of_posts_with_no_comments = len(recent_data) - len(comments_threads)
recent_comments_counts = [0] * recent_count_of_posts_with_no_comments
recent_comments_counts += [len(flatten_comments_pages(thread)) for thread in recent_comments_threads]
plt.hist(recent_comments_counts, bins=max(recent_comments_counts))
plt.title("Comments-per-post Histogram (last {} days)".format(days_ago))
plt.xlabel("Comments per post")
plt.ylabel("Frequency")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Is it big enough?
Step2: Wow! So data! Very big!
Step3: At the time this was written, the file was just about 3MB, and there were fewer than 2k posts... note that excludes comments made on posts, but still, this stuff is small. It is small enough that at no point do we need to do anything clever from a data indexing/caching/storage perspective, so to start we will take the simplistic but often appropriate approach of slicing and dicing our big_data object directly. Later on we'll get into pandas DataFrame objects.
Step4: Are we missing anything? A good way to sanity check things is to actually inspect the data, so let's look at a random item
Step5: From that you should be able to sense that we are missing some things - it isn't simply that there are some number of fields that describe each item, because some of those fields have data hierarchies beneath them, for example
Step6: From that we can see some fields have hierarchies within them, e.g. likes have a list of id dictionaries, which happen to be relatively trivial (names and ids... I wonder why Facebook didn't just post the id and make you look up the name?) but the comment field is a bit more complex, wherein it contains a list of dictionaries with each field potentially being a dictionary of its own, e.g. we can see that the second comment on that post tagged Teuku Faruq
Step7: Data quality annoyances
Step8: Apparently that's not always the case, sometimes there are 2 items in the list, let's see what that looks like...
Step9: Skimming the above it looks as though very long comment threads are split into multiple "pages" in the comments list. This may be an artifact of the paging code in pull_feed.py, which is not ideal. At some point we may fix it there, but for the time being we'll just consider it a data quality inconvenience that we will have to deal with.
Step10: Start plotting things already dammit
Step11: This sort of adds up intuitively; posts with long comment threads will be rare, though from experience with this forum it does not seem right to conclude that there is a lot of posting going on with no interaction... the community is a bit more engaged than that.
Step12: Note that the above does not include Likes on Comments made on posts; only Likes made on posts themselves are counted.
Step13: In general it seems my hunch may have been right, but it will be clearer if we plot it.
Step14: This is looking pretty legit now. We can see that lately there's been a significant uptick in the number of posts, and an uptick in the ratio of posts that receive at least one Like.
|
7,398
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
tf.logging.set_verbosity(tf.logging.ERROR)
# Data sets
# The Iris data set contains 150 rows of data, comprising 50 samples from each
# of three related Iris species: Iris setosa, Iris virginica, and Iris versicolor.
# Each row contains the following data for each flower sample: sepal length, sepal
# width, petal length, petal width, and flower species.
# Flower species are represented as integers, with 0 denoting Iris setosa,
# 1 denoting Iris versicolor, and 2 denoting Iris virginica.
IRIS_TRAINING = "data/iris_training.csv"
IRIS_TEST = "data/iris_test.csv"
# Load datasets.
# Datasets in tf.contrib.learn are named tuples; you can access feature data
# and target values via the data and target fields.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float,
target_column=-1)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float,
target_column=-1)
# tf.contrib.learn offers a variety of predefined models, called Estimators,
# which you can use "out of the box" to run training and evaluation operations on
# your data.
# Here, we'll configure a Deep Neural Network Classifier model to fit
# the Iris data.
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="/tmp/iris_model",
enable_centered_bias = 'True')
# Fit model.
classifier.fit(x=training_set.data,
y=training_set.target,
steps=2000)
# Evaluate accuracy.
accuracy_score = classifier.evaluate(x=test_set.data,
y=test_set.target)["accuracy"]
print('Accuracy: {0:f}'.format(accuracy_score))
print('\n')
# Classify two new flower samples.
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)
y = list(classifier.predict(new_samples, as_iterable=True))
print('Predictions: {} {}'.format(str(y[0]), str(y[1])))
import tensorflow.contrib.learn.python.learn as learn
# sklearn integration
from sklearn import datasets, metrics
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.LinearClassifier(n_classes=3, feature_columns=feature_columns)
classifier.fit(iris.data, iris.target, steps=200, batch_size=32)
iris_predictions = list(classifier.predict(iris.data, as_iterable=True))
score = metrics.accuracy_score(iris.target, iris_predictions)
print("Accuracy: %f" % score)
import tensorflow.contrib.learn.python.learn as learn
from sklearn import datasets, metrics, preprocessing
boston = datasets.load_boston()
x = preprocessing.StandardScaler().fit_transform(boston.data)
feature_columns = learn.infer_real_valued_columns_from_input(x)
regressor = learn.LinearRegressor(feature_columns=feature_columns)
regressor.fit(x, boston.target, steps=200, batch_size=32)
boston_predictions = list(regressor.predict(x, as_iterable=True))
score = metrics.mean_squared_error(boston_predictions, boston.target)
print ("MSE: %f" % score)
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
import tensorflow.contrib.layers.python.layers as layers
import tensorflow.contrib.learn.python.learn as learn
iris = datasets.load_iris()
def my_model(features, labels):
DNN with three hidden layers.
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
labels = tf.one_hot(labels, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10.
features = layers.stack(features, layers.fully_connected, [10, 20, 10])
# Create two tensors respectively for prediction and loss.
prediction, loss = (
tf.contrib.learn.models.logistic_regression(features, labels)
)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(iris.data, iris.target, steps=1000)
y_predicted = [p['class'] for p in classifier.predict(iris.data, as_iterable=True)]
score = metrics.accuracy_score(iris.target, y_predicted)
print('Accuracy: {0:f}'.format(score))
labels = [0,1,3,1,1,0,2,2]
sess = tf.Session()
print(sess.run(tf.one_hot(labels, 4, 1, 0)))
sess.close()
'''
Trains a simple deep NN on the MNIST dataset.
You can get to 98.40% test accuracy after 20 epochs.
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
tf.reset_default_graph()
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
# print model characteristics
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=1,
validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('\n')
print('Test score:', score[0])
print('Test accuracy:', score[1])
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(1, input_dim=784, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
data = np.random.random((1000, 784))
labels = np.random.randint(2, size=(1000, 1))
# train the model, iterating on the data in batches
# of 32 samples
model.fit(data, labels, nb_epoch=10, batch_size=32)
model.summary()
from keras.layers import Merge
left_branch = Sequential()
left_branch.add(Dense(32, input_dim=784))
right_branch = Sequential()
right_branch.add(Dense(32, input_dim=784))
merged = Merge([left_branch, right_branch], mode='concat')
model = Sequential()
model.add(merged)
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
from keras.utils.np_utils import to_categorical
data_1 = np.random.random((1000, 784))
data_2 = np.random.random((1000, 784))
# these are integers between 0 and 9
labels = np.random.randint(10, size=(1000, 1))
# we convert the labels to a binary matrix of size (1000, 10)
# for use with categorical_crossentropy
labels = to_categorical(labels, 10)
# train the model
# note that we are passing a list of Numpy arrays as training data
# since the model has 2 inputs
model.fit([data_1, data_2], labels, nb_epoch=10, batch_size=32)
model.summary()
'''Train a Siamese MLP on pairs of digits from the MNIST dataset.
It follows Hadsell-et-al.'06 [1] by computing the Euclidean distance on the
output of the shared network and by optimizing the contrastive loss (see paper
for mode details).
[1] "Dimensionality Reduction by Learning an Invariant Mapping"
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Gets to 99.5% test accuracy after 20 epochs.
3 seconds per epoch on a Titan X GPU
'''
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import tensorflow as tf
np.random.seed(1337) # for reproducibility
tf.reset_default_graph()
import random
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.optimizers import RMSprop
from keras import backend as K
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def create_pairs(x, digit_indices):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(10)]) - 1
for d in range(10):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, 10)
dn = (d + inc) % 10
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_dim):
'''
Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(2, activation=None,name='emb'))
return seq
def compute_accuracy(predictions, labels):
'''Compute classification accuracy with a fixed threshold on distances.
'''
return labels[predictions.ravel() < 0.5].mean()
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
input_dim = 784
nb_epoch = 10
# create training+test positive and negative pairs
digit_indices = [np.where(y_train == i)[0] for i in range(10)]
tr_pairs, tr_y = create_pairs(X_train, digit_indices)
digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_y = create_pairs(X_test, digit_indices)
# network definition
base_network = create_base_network(input_dim)
input_a = Input(shape=(input_dim,))
input_b = Input(shape=(input_dim,))
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model(input=[input_a, input_b], output=distance)
# train
rms = RMSprop()
model.compile(loss=contrastive_loss, optimizer=rms)
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y),
batch_size=128,
nb_epoch=nb_epoch)
# compute final accuracy on training and test sets
pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(pred, tr_y)
pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
te_acc = compute_accuracy(pred, te_y)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
from keras.layers import Input, Embedding, LSTM, Dense, merge
from keras.models import Model
# headline input: meant to receive sequences of 100 integers, between 1 and 10000.
# note that we can name any layer by passing it a "name" argument.
main_input = Input(shape=(100,), dtype='int32', name='main_input')
# this embedding layer will encode the input sequence
# into a sequence of dense 512-dimensional vectors.
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)
# a LSTM will transform the vector sequence into a single vector,
# containing information about the entire sequence
lstm_out = LSTM(32)(x)
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)
auxiliary_input = Input(shape=(5,), name='aux_input')
x = merge([lstm_out, auxiliary_input], mode='concat')
# we stack a deep fully-connected network on top
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
# and finally we add the main logistic regression layer
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
model = Model(input=[main_input, auxiliary_input], output=[main_output, auxiliary_output])
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
loss_weights=[1., 0.2])
from keras.layers import Input, LSTM, Dense, merge
from keras.models import Model
tweet_a = Input(shape=(140, 256))
tweet_b = Input(shape=(140, 256))
# this layer can take as input a matrix
# and will return a vector of size 64
shared_lstm = LSTM(64)
# when we reuse the same layer instance
# multiple times, the weights of the layer
# are also being reused
# (it is effectively *the same* layer)
encoded_a = shared_lstm(tweet_a)
encoded_b = shared_lstm(tweet_b)
# we can then concatenate the two vectors:
merged_vector = merge([encoded_a, encoded_b], mode='concat', concat_axis=-1)
# and add a logistic regression on top
predictions = Dense(1, activation='sigmoid')(merged_vector)
# we define a trainable model linking the
# tweet inputs to the predictions
model = Model(input=[tweet_a, tweet_b], output=predictions)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit([data_a, data_b], labels, nb_epoch=10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple linear classifier
Step2: Simple linear regression.
Step4: Custom model
Step5: keras
Step6: keras sequential mode
Step7: For a multi-input model with 10 classes
Step8: Keras functional API
Step9: Multi-input and multi-output models
Step10: Here we insert the auxiliary loss, allowing the LSTM and Embedding layer to be trained smoothly even though the main loss will be much higher in the model.
Step11: At this point, we feed into the model our auxiliary input data by concatenating it with the LSTM output
Step12: This defines a model with two inputs and two outputs
Step13: We compile the model and assign a weight of 0.2 to the auxiliary loss. To specify different loss_weights or loss for each different output, you can use a list or a dictionary. Here we pass a single loss as the loss argument, so the same loss will be used on all outputs.
Step14: We can train the model by passing it lists of input arrays and target arrays
Step15: To share a layer across different inputs, simply instantiate the layer once, then call it on as many inputs as you want
|
7,399
|
<ASSISTANT_TASK:>
Python Code:
#projection 2014+
pop_projection = df.from_csv("NP2014_D1.csv", index_col='year')
pop_projection = pop_projection[(pop_projection.sex == 0) & (pop_projection.race == 0) & (pop_projection.origin == 0)]
pop_projection = pop_projection.drop(['sex', 'race', 'origin'], axis=1)
pop_projection = pop_projection.drop(pop_projection.index[11:], axis=0)
pop_projection = pop_projection.drop(pop_projection.index[:1], axis=0)
#estimates 2010-2014
historical1 = pd.read_csv("NC-EST2014-AGESEX-RES.csv")
historical1 = historical1[historical1.SEX == 0]
historical1 = historical1.drop(['SEX', 'CENSUS2010POP', 'ESTIMATESBASE2010'],axis = 1)
pop_dep1 = historical1[historical1.AGE<=19].sum()
pop_dep1 = pop_dep1.drop(['AGE'],axis = 0)
pop_snr1 = historical1[(historical1.AGE>=65)&(historical1.AGE<999)].sum()
pop_snr1 = pop_snr1.drop(['AGE'], axis = 0)
total_pop1 = historical1[historical1.AGE==999]
total_pop1 = total_pop1.drop(['AGE'], axis = 1)
#estimates 2008-2009
historical2 = pd.read_csv("US-EST00INT-ALLDATA.csv")
historical2 = historical2[(historical2.MONTH==7)&(historical2.YEAR>=2008)&(historical2.YEAR<2010)]
historical2 = historical2.drop(historical2.columns[4:],axis = 1)
historical2 = historical2.drop(historical2.columns[0],axis = 1)
pop_dep2 = [historical2.TOT_POP[(historical2.YEAR ==2008) & (historical2.AGE <=19)].sum(),historical2.TOT_POP[(historical2.YEAR ==2009) & (historical2.AGE <=19)].sum()]
pop_snr2 = [historical2.TOT_POP[(historical2.YEAR ==2008) & (historical2.AGE >=65) & (historical2.AGE < 999)].sum(), historical2.TOT_POP[(historical2.YEAR ==2009) & (historical2.AGE >=65) & (historical2.AGE < 999)].sum()]
total_pop2 = [historical2.TOT_POP[(historical2.YEAR ==2008) & (historical2.AGE == 999)].sum(), historical2.TOT_POP[(historical2.YEAR ==2009) & (historical2.AGE == 999)].sum() ]
POP_DEP = pd.concat([df(pop_dep2),df(pop_dep1),df(pop_projection[pop_projection.columns[1:21]].sum(axis = 1))])
POP_SNR = pd.concat([df(pop_snr2),df(pop_snr1),df(pop_projection[pop_projection.columns[66:]].sum(axis = 1))])
TOTAL_POP = pd.concat([df(total_pop2), df(total_pop1.values.transpose()),df(pop_projection.total_pop.values)])
Stage_II_targets = df(TOTAL_POP)
Stage_II_targets.columns = ['TOTAL_POP']
Stage_II_targets['POP_DEP'] = POP_DEP.values
Stage_II_targets['POP_SNR'] = POP_SNR.values
index = list(range(2008,2025))
Stage_II_targets.index = index
APOPN = Stage_II_targets.TOTAL_POP/Stage_II_targets.TOTAL_POP[2008]
Stage_I_factors = df(APOPN, index = index)
Stage_I_factors.columns = ['APOPN']
Stage_I_factors['APOPDEP'] = df(Stage_II_targets.POP_DEP/Stage_II_targets.POP_DEP[2008],index = index)
Stage_I_factors['APOPSNR'] = df(Stage_II_targets.POP_SNR/Stage_II_targets.POP_SNR[2008],index = index)
pop_growth_rates = df(Stage_II_targets.TOTAL_POP.pct_change()+1)
pop_growth_rates['POPDEP'] = Stage_II_targets.POP_DEP.pct_change()+1
pop_growth_rates['POPSNR'] = Stage_II_targets.POP_SNR.pct_change()+1
pop_growth_rates = pop_growth_rates.drop(pop_growth_rates.index[0],axis = 0)
cbo_baseline = (df.from_csv("CBO_baseline.csv", index_col=0)).transpose()
cbo_baseline.index = index
Stage_I_factors['AGDPN'] = df(cbo_baseline.GDP/cbo_baseline.GDP[2008], index = index)
Stage_I_factors['ATXPY'] = df(cbo_baseline.TPY/cbo_baseline.TPY[2008], index = index)
Stage_I_factors['ASCHF'] = df(cbo_baseline.SCHF/cbo_baseline.SCHF[2008], index = index)
Stage_I_factors['ABOOK'] = df(cbo_baseline.BOOK/cbo_baseline.BOOK[2008], index = index)
Stage_I_factors['ACPIU'] = df(cbo_baseline.CPIU/cbo_baseline.CPIU[2008], index = index)
Stage_I_factors['ACPIM'] = df(cbo_baseline.CPIM/cbo_baseline.CPIM[2008], index = index)
cbo_growth_rates = cbo_baseline.pct_change()+1
cbo_growth_rates = cbo_growth_rates.drop(cbo_growth_rates.index[0], axis=0)
irs_returns = (df.from_csv("IRS_return_projection.csv", index_col=0)).transpose()
return_growth_rate = irs_returns.pct_change()+1
return_growth_rate.Returns['2023'] = return_growth_rate.Returns['2022']
return_growth_rate.Returns['2024'] = return_growth_rate.Returns['2022']
return_growth_rate.Returns.index = index
soi_estimates = (df.from_csv("SOI_estimates.csv", index_col=0)).transpose()
historical_index = list(range(2008,2013))
soi_estimates.index = historical_index
return_projection = soi_estimates
for i in range(2012,2024):
Single = return_projection.Single[i]*return_growth_rate.Returns[i+1]
Joint = return_projection.Joint[i]*return_growth_rate.Returns[i+1]
HH = return_projection.HH[i]*return_growth_rate.Returns[i+1]
SS_return = return_projection.SS_return[i]*pop_growth_rates.POPSNR[i+1]
Dep_return = return_projection.Dep_return[i]*pop_growth_rates.POPDEP[i+1]
INTS = return_projection.INTS[i]*cbo_growth_rates.INTS[i+1]
DIVS = return_projection.DIVS[i]*cbo_growth_rates.DIVS[i+1]
SCHCI = return_projection.SCHCI[i]*cbo_growth_rates.SCHC[i+1]
SCHCL = return_projection.SCHCL[i]*cbo_growth_rates.SCHC[i+1]
CGNS = return_projection.CGNS[i]*cbo_growth_rates.CGNS[i+1]
Pension = return_projection.Pension[i]*cbo_growth_rates.TPY[i+1]
SCHEI = return_projection.SCHEI[i]*cbo_growth_rates.BOOK[i+1]
SCHEL = return_projection.SCHEL[i]*cbo_growth_rates.BOOK[i+1]
SS = return_projection.SS[i]*cbo_growth_rates.SOCSEC[i+1]
UCOMP = return_projection.UCOMP[i]*cbo_growth_rates.UCOMP[i+1]
Wage_1 = return_projection.WAGE_1[i]*cbo_growth_rates.Wages[i+1]
Wage_2 = return_projection.WAGE_2[i]*cbo_growth_rates.Wages[i+1]
Wage_3 = return_projection.WAGE_3[i]*cbo_growth_rates.Wages[i+1]
Wage_4 = return_projection.WAGE_4[i]*cbo_growth_rates.Wages[i+1]
Wage_5 = return_projection.WAGE_5[i]*cbo_growth_rates.Wages[i+1]
Wage_6 = return_projection.WAGE_6[i]*cbo_growth_rates.Wages[i+1]
Wage_7 = return_projection.WAGE_7[i]*cbo_growth_rates.Wages[i+1]
Wage_8 = return_projection.WAGE_8[i]*cbo_growth_rates.Wages[i+1]
Wage_9 = return_projection.WAGE_9[i]*cbo_growth_rates.Wages[i+1]
Wage_10 = return_projection.WAGE_10[i]*cbo_growth_rates.Wages[i+1]
Wage_11 = return_projection.WAGE_11[i]*cbo_growth_rates.Wages[i+1]
Wage_12 = return_projection.WAGE_12[i]*cbo_growth_rates.Wages[i+1]
current_year = df([Single, Joint, HH,
SS_return,Dep_return,INTS,DIVS,SCHCI,SCHCL,
CGNS,Pension, SCHEI, SCHEL,SS,UCOMP,Wage_1,
Wage_2,Wage_3,Wage_4,Wage_5,Wage_6,Wage_7,
Wage_8, Wage_9, Wage_10, Wage_11, Wage_12]).transpose()
current_year.columns = return_projection.columns
current_year.index = [i+1]
return_projection = return_projection.append(current_year)
Stage_II_targets = pd.concat([Stage_II_targets,return_projection], axis = 1)
total_return = df(Stage_II_targets[Stage_II_targets.columns[3:6]].sum(axis = 1), columns=['ARETS'])
Stage_I_factors['ARETS'] = total_return/total_return.ARETS[2008]
total_wage = df(Stage_II_targets[Stage_II_targets.columns[18:30]].sum(axis = 1), columns=['AWAGE'])
Stage_I_factors['AWAGE'] = total_wage/total_wage.AWAGE[2008]
Stage_I_factors['ASCHCI'] = Stage_II_targets.SCHCI/Stage_II_targets.SCHCI[2008]
Stage_I_factors['ASCHCL'] = Stage_II_targets.SCHCL/Stage_II_targets.SCHCL[2008]
Stage_I_factors['ASCHEI'] = Stage_II_targets.SCHEI/Stage_II_targets.SCHEI[2008]
Stage_I_factors['ASCHEL'] = Stage_II_targets.SCHEL/Stage_II_targets.SCHEL[2008]
Stage_I_factors['AINTS'] = Stage_II_targets.INTS/Stage_II_targets.INTS[2008]
Stage_I_factors['ADIVS'] = Stage_II_targets.DIVS/Stage_II_targets.DIVS[2008]
Stage_I_factors['ACGNS'] = Stage_II_targets.CGNS/Stage_II_targets.CGNS[2008]
Stage_I_factors['ASOCSEC'] = Stage_II_targets.SS/Stage_II_targets.SS[2008]
Stage_I_factors['AUCOMP'] = Stage_II_targets.UCOMP/Stage_II_targets.UCOMP[2008]
#pd.options.display.float_format = '{:,.4f}'.format
Stage_I_factors.to_csv(path_or_buf = "Stage_I_factors.csv", float_format ='%.4f')
Stage_I_factors = Stage_I_factors.transpose()
Stage_I_factors.to_csv(path_or_buf = "../Stage II/Stage_I_factors.csv", float_format ='%.4f')
Stage_II_targets = Stage_II_targets.transpose()
Stage_II_targets.to_csv(path_or_buf = "../Stage II/Stage_II_targets.csv", float_format = '%.4f')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import CBO baseline
Step2: Import IRS number of returns projection
Step3: Import SOI estimates (2008 - 2012)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.