Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
4,300
|
<ASSISTANT_TASK:>
Python Code:
solution = 0
N = 0
while N < 1000:
if (N % 3 == 0) or (N % 5 == 0):
solution = solution + N
N+=1
print solution
s=0
a=1
b=1
while b < 4E6:
if b % 2 ==0: s=s+b;
c=a+b
a=b
b=c
print s
n=600851475143 #13195
a=2
while not a > n:
if n%a:
a+=1
else:
n//=a
print a
a=999
b=999
while a>100 and b>100:
c = a*b
if str(c) == str(c)[::-1]:
print a,"*",b,"=",c
break
a=a-1
b=b-1
#brute force
n=2520
l=n*11*12*13*14*15*16*17*18*19
while n<l:
reminder=[n % (i+1) for i in range(20)]
if sum(reminder) == 0:
print n
break
n=n+1
# 1*2*3*4*5*7*3=2520
# prime number: must be a factor
# composite number: factorize and remove smaller prime number
def is_prime(n):
if n < 1:
return False
elif n < 4:
return True
elif (n % 2 == 0) or (n % 3 == 0):
return False
i = 5
while i*i < n + 1:
if (n % i == 0):
return false
i=i+1
#while i*i < n + 1:
# if (n % i == 0) or (n % (i + 2) == 0):
# return False
# i = i + 6
return True
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
fac=[]
for i in range(20):
if is_prime(i+1):
fac.append(i+1)
else:
test=fac[:]
for p in prime_factors(i+1):
if p in test:
test.remove(p)
else:
fac.append(p)
print fac
product = 1
for x in fac:
product *= x
print product
sum_of_the_square = sum([i*i for i in range(1,100+1)])
square_of_the_sum = sum(range(1,100+1))
square_of_the_sum = square_of_the_sum*square_of_the_sum
print square_of_the_sum - sum_of_the_square
def is_prime(n):
if n < 2:
return False
elif n < 4:
return True
elif (n % 2 == 0) or (n % 3 == 0):
return False
i = 5
while i*i < n + 1:
if (n % i == 0):
return False
i=i+1
return True
i=1
count_prime=0
while count_prime<10001:
i+=1
if is_prime(i):
count_prime+=1
print i,"#",count_prime
s='''73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450'''
import re
s=re.sub("[^0-9]", "", s)
#ss = [s[i:i+13] for i in range(len(s)-12)]
#print ss[-1]
maxproduct=1
for i in range(len(s)-12):
ss = map(int,s[i:i+13])
product = 1
for x in ss:
product *= x
if product > maxproduct:
maxproduct=product
print maxproduct
# By substituting c=1000-(a+b) into Pythagorean equality, we get
# n^2 = 2n(a+b) - 2ab, n=1000 here
def f(a,b,n):
return 2*n*(a+b) - 2*a*b - n*n
import numpy as np
n=1000
X,Y=np.meshgrid(range(1,n),range(1,n))
X=np.tril(X,-1).ravel()
Y=np.tril(Y,-1).ravel()
for p in zip(X[X>0],Y[Y>0]):
if f(p[0],p[1],n) == 0:
print p[0],p[1], n-p[0]-p[1]
print p[0]*p[1]*(n-p[0]-p[1])
def is_prime(n):
if n < 2:
return False
elif n < 4:
return True
elif (n % 2 == 0) or (n % 3 == 0):
return False
i = 5
while i*i < n + 1:
if (n % i == 0) or (n % (i + 2) == 0):
return False
i = i + 6
return True
i=1
s=0
while i<2e6:
i+=1
if is_prime(i):
s+=i
print i,"SUM",s
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q002
Step2: Q003
Step3: Q004
Step4: Q005
Step5: Q006
Step6: Q007
Step7: Q008
Step8: Q009
Step9: Q010
|
4,301
|
<ASSISTANT_TASK:>
Python Code:
a = [1, 2, "Hi"] # Создать список и присвоить переменной `а` этот список
print(a[0], a[1], a[2]) # Обращение к элементам списка, индексация с нуля
b = list() # Создать пустой список
c = [] # Другой способ создать пустой список
a = [0] * 10
print(a)
N = 5
b = [True] * N
print(b)
a = [1, 2, "Hi"]
a[1] = 4 # Изменить отдельный элемент списка
print(a) # Вывести весь список
a = [1, 2, "Hi"]
print(a[-1], '=', a[2])
print(a[-2], '=', a[1])
print(a[-3], '=', a[0])
b = ["a", 1, -2, [1, 2, 3], "b", 3.4] # Элементом списка может быть список
print(b[-3])
a = [2, 3]
a.append(5)
print(a)
a = [1, 3, 6]
print(len(a))
a = [3, 2, 4, 1, 2]
a.sort()
print(a)
a = [1, 2, 3]
b = [4, 5, 6]
c = a + b
print(c)
a = [1, 2, 3, 4] # а - ссылка на список, каждый элемент списка это ссылки на объекты 1, 2, 3, 4
b = a # b - ссылка на тот же самый список
# В Python у каждого объекта есть свой id (идентификатор) -
# некоторое уникальное число, сопоставленное объекту
print("id(a) = ", id(a))
print("id(b) = ", id(b))
a[0] = -1 # Меняем элемент списка a
print("b =", b) # Значение b тоже поменялось!
a = 2
b = a
print("a =", a, ", id(a) =", id(a))
print("b =", b, ", id(b) =", id(b))
print()
a = 3
print("a =", a, ", id(a) =", id(a))
print("b =", b, ", id(b) =", id(b))
lst = []
if lst:
print("Ветка if")
else:
print("Ветка else")
lst = []
if not lst:
print("Ветка if")
else:
print("Ветка else")
lst = [2, 3, 4]
if lst:
print("Ветка if")
else:
print("Ветка else")
lst = [[]]
if lst:
print("Ветка if")
else:
print("Ветка else")
lst = [1, 6, 29, 4, "a", 3, -1]
if 3 in lst:
print("3 is in the list")
else:
print("3 is not in the list")
lst = [1, 6, 29, 4, "a", 3, -1]
if 5 not in lst:
print("5 is not in the list")
for i in [2, 3, 5, 7]:
print(i)
lst = [3, "ads", [1, 2]]
for i in lst:
print(i)
r = range(5)
print(r[2], r[-1])
s = "1#2#3"
print(s.split("#"))
names = "Artem Irina Zhenya"
print(names.split())
s = "1abc2abcd3"
print(s.split("abc"))
lst = ["1", "2", "3"]
print("#".join(lst))
print("".join(["1", "2", "3"]))
shopping_list = ', '.join(['apples', 'milk', 'flour', 'jam'])
print(shopping_list)
shopping_list = '\n'.join(['apples', 'milk', 'flour', 'jam'])
print(shopping_list)
a = "long string" # у нас есть строка, мы хотим уметь ее изменять
print(a)
l = list(a) # сделаем из строки список символов
print(l)
l[2] = "!" # изменим один элемент списка
print(l)
s = "".join(l)
print(s)
a, b, c = [2, "abs", 3] # Присваиваем в явном виде заданный список
print(a, b, c)
lst = [2, "abs", 3]
a, b, c = lst # Чуть менее тривиальная запись того же самого
print(a, b, c)
lst = [[1, 1], [2, 4], [3, 9], [4, 16]]
for a, b in lst:
print(a, b)
print()
# То же самое в более привычной форме
for x in lst:
a, b = x
print(a, b)
l = [1, 2, -1, 3, 2, -2, 1, 5, 7, 3]
print(min(l))
# min можно брать и от двух элементов вместо списка
print(min(12, 10))
print(sum([2, 3, 11, 1]))
l = [1, 2, -1, 3, 2, -2, 1, 5, 7, 3]
print(l.index(3)) # В списке l у нас две тройки и index возвращает индекс первой из них
l = [1, 2, -1, 3, 2, -2, 1, 5, 7, 3]
print(l.index(115)) # Если элемента нет в списке, то произойдет ошибка выполнения
l = [1, 2, 3]
last_element = l.pop()
print(l)
print(last_element)
l = [1, 2, 4, 5]
l.insert(2, 3)
print(l)
a = [1, 4, 2, 5, 2]
a.reverse()
print(a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Можно создать список из однотипных элементов заданной длины
Step2: Доступ к элементам
Step3: Обратите внимание, что при выведении списка таким способом он выводится с квадратными скобками и запятыми.
Step4: Добавление элемента в конец
Step5: Длина списка
Step6: Сортировка
Step7: Сложение
Step8: Особенности присваивания списков
Step9: Чтобы узнать как скопировать список, читайте страницу про срезы.
Step10: Списки и if
Step11: Непустой список преобразуется к True
Step12: Проверка наличия элемента в списке
Step13: Также есть аналогичный оператор not in, который проверяет, что элемента нет в списке
Step14: Списки и for
Step15: В каком-то смысле range тоже похож на list
Step16: Списки и строки
Step17: По умолчанию (без параметров) split разбивает строку по пробельным символам, игнорируя подряд идущие пробельные символы
Step18: Пример строки-разделителя более чем из одной буквы
Step19: Join — объединение строк
Step20: Разделительная строка может быть пустой
Step21: Обратите внимание, что разделитель ставится только между строками, и никогда в конце.
Step22: Разделитель в виде перевода строки
Step23: Изменение символа строки
Step24: Также существует другой способ изменить третий символ строки, используя срезы.
Step25: Кроме того, можно распаковывать списки прямо в цикле for
Step26: Другие операции
Step27: Сумма
Step28: Индекс элемента
Step29: ValueError
Step30: Вставка элемента
Step31: Переворачивание списка
|
4,302
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import numpy as np
import mne
data_path = mne.datasets.opm.data_path()
subject = 'OPM_sample'
subjects_dir = op.join(data_path, 'subjects')
raw_fname = op.join(data_path, 'MEG', 'OPM', 'OPM_SEF_raw.fif')
bem_fname = op.join(subjects_dir, subject, 'bem',
subject + '-5120-5120-5120-bem-sol.fif')
fwd_fname = op.join(data_path, 'MEG', 'OPM', 'OPM_sample-fwd.fif')
coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat')
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(None, 90, h_trans_bandwidth=10.)
raw.notch_filter(50., notch_widths=1)
# Set epoch rejection threshold a bit larger than for SQUIDs
reject = dict(mag=2e-10)
tmin, tmax = -0.5, 1
# Find Median nerve stimulator trigger
event_id = dict(Median=257)
events = mne.find_events(raw, stim_channel='STI101', mask=257, mask_type='and')
picks = mne.pick_types(raw.info, meg=True, eeg=False)
# we use verbose='error' to suppress warning about decimation causing aliasing,
# ideally we would low-pass and then decimate instead
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, verbose='error',
reject=reject, picks=picks, proj=False, decim=10,
preload=True)
evoked = epochs.average()
evoked.plot()
cov = mne.compute_covariance(epochs, tmax=0.)
del epochs, raw
bem = mne.read_bem_solution(bem_fname)
trans = None
# To compute the forward solution, we must
# provide our temporary/custom coil definitions, which can be done as::
#
# with mne.use_coil_def(coil_def_fname):
# fwd = mne.make_forward_solution(
# raw.info, trans, src, bem, eeg=False, mindist=5.0,
# n_jobs=1, verbose=True)
fwd = mne.read_forward_solution(fwd_fname)
# use fixed orientation here just to save memory later
mne.convert_forward_solution(fwd, force_fixed=True, copy=False)
with mne.use_coil_def(coil_def_fname):
fig = mne.viz.plot_alignment(
evoked.info, trans, subject, subjects_dir, ('head', 'pial'), bem=bem)
mne.viz.set_3d_view(figure=fig, azimuth=45, elevation=60, distance=0.4,
focalpoint=(0.02, 0, 0.04))
# Fit dipoles on a subset of time points
with mne.use_coil_def(coil_def_fname):
dip_opm, _ = mne.fit_dipole(evoked.copy().crop(0.015, 0.080),
cov, bem, trans, verbose=True)
idx = np.argmax(dip_opm.gof)
print('Best dipole at t=%0.1f ms with %0.1f%% GOF'
% (1000 * dip_opm.times[idx], dip_opm.gof[idx]))
# Plot N20m dipole as an example
dip_opm.plot_locations(trans, subject, subjects_dir,
mode='orthoview', idx=idx)
inverse_operator = mne.minimum_norm.make_inverse_operator(
evoked.info, fwd, cov, loose=0., depth=None)
del fwd, cov
method = "MNE"
snr = 3.
lambda2 = 1. / snr ** 2
stc = mne.minimum_norm.apply_inverse(
evoked, inverse_operator, lambda2, method=method,
pick_ori=None, verbose=True)
# Plot source estimate at time of best dipole fit
brain = stc.plot(hemi='rh', views='lat', subjects_dir=subjects_dir,
initial_time=dip_opm.times[idx],
clim=dict(kind='percent', lims=[99, 99.9, 99.99]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare data for localization
Step2: Examine our coordinate alignment for source localization and compute a
Step3: Perform dipole fitting
Step4: Perform minimum-norm localization
|
4,303
|
<ASSISTANT_TASK:>
Python Code:
numero = 78277485
a = numero % 1000; a
b = numero % 100; b
from mpmath import mp #Librería usada para modiuficar la precision decimal de PI
from math import sqrt
mp.dps = 1000 #Establecer la precisión que queremos
pi_1000 = mp.pi
print(pi_1000)
besima = str(pi_1000)[b+2]
print(besima)
aesima = str(pi_1000)[a+2]
print(aesima)
#Generador de python que devuelve los divisores de un número n
def divisores(n):
if n < 1:
raise ValueError("El número ha de ser natural y mayor que 0")
div = [] #Complementos de numeros
for numero in range(1, int(sqrt(n) +1 )):
if n%numero == 0:
yield numero #Si es un divisor lo devolvemos
#En la siguiente iteración comprobamos si su cuadrado
#no es n. Simpre va a ser <= n ya
#que buscamos hasta la raiz del numero
if numero*numero != n:
#Si no es lo añadimos como divisor
div = div + [int(n/numero)]
for divisor in div:
yield divisor
list(divisores(a))
# La suma de los divisores es
sum( list(divisores(a)) )
# Un ejemplo para ver como sería con otro número
list( divisores( 24 ) )
sum( list( divisores( 24 ) ) )
primos = [2]
#Una lista para almacenar los primos que encontremos
def es_primo(n):
if n >= 2:
for y in range(2,int(sqrt(n))+1):
if not ( n % y ):
return False
else:
return False
return True
for i in range(1,920):
if es_primo(i):
primos.append(i)
print(primos)
primos [-1]
from itertools import takewhile, count
[x for x in takewhile(lambda x: not es_primo(x), count(a+1))][-1]+1
from math import factorial
#factorial(1000+a)
from operator import mul
from functools import reduce
def factorial_mio(n):
return reduce(mul, range(1,n+1))
factorial(1000+a) == factorial_mio(1000+a)
Para buscar el número de 0 se ha procedido a convertir el número en una cadena de caracteres
invertir la posicion de los digitos, el último pasa a ser el primero y así con el resto de dígitos,
por último se recorre la cadena hasta que se encuentra un dígito distinto de 0
total_ceros = 0
distinto_encontrado = False
numero = str( factorial_mio(1000+a) )[::-1]
while not distinto_encontrado:
if numero[total_ceros] == '0':
total_ceros += 1
else:
distinto_encontrado = True
total_ceros
def divisores(n):
if n < 1:
raise ValueError("El número ha de ser natural y mayor que 0")
div = [] #Complementos de numeros
for numero in range(1, int(sqrt(n) +1 )):
if n%numero == 0:
yield numero #Si es un divisor lo devolvemos
#En la siguiente iteración comprobamos si su cuadrado
#no es n. Simpre va a ser <= n ya
#que buscamos hasta la raiz del numero
if numero*numero != n:
#Si no es lo añadimos como divisor
div = div + [int(n/numero)]
for divisor in div:
yield divisor
def suma_divisores(n):
divis = list(divisores(n))
#Ahora quitamos el propio número
divis.remove(n)
return sum(divis)
suma_divisores(24)
[x for x in range(1,1001) if x == suma_divisores(x)]
len([x for x in range(1,1001) if x == suma_divisores(x)])
primos_en_258 = []
for i in range(1,258):
if es_primo(i):
primos_en_258.append(i)
print(primos_en_258)
def LucasLehmer(p):
Este algoritmo determina si 2**p-1 es primo para p >= 2
if p == 2:
return True
s = 4
M = 2**p -1
for i in range(3, p+1):
s = (s**2 -2) % M
return s == 0
lista = []
for i in primos_en_258:
lista.append( (i, LucasLehmer(i) ) )
print(lista)
print([x[0] for x in lista if x[1] == True ])
numero = 1000+2*a
print(numero)
def lista_primos(n):
lista = []
for i in range(1,n+1):
if es_primo(i):
lista.append(i)
return lista
print(lista_primos(numero))
def suma_primos(numero):
primos = lista_primos(numero)
indice_i = 0
indice_m = len(primos) - 1
while indice_i <= indice_m:
suma = primos[indice_i] + primos[indice_m]
if suma < numero:
indice_i += 1
else:
if suma == numero:
yield primos[indice_i], primos[indice_m]
indice_m -= 1
list(suma_primos(numero))[0]
print(list(suma_primos(numero)))
len(list(suma_primos(numero)))
def golbach(n):
primos = lista_primos(n)
sumas = list(suma_primos(n))
return sumas[0]
golbach(12)
def dgoldbach(n):
primos = lista_primos(n)
sumas = list(suma_primos(n))
return len(sumas)
dgoldbach(24)
def ackermann(m, n):
if m == 0:
return n+1
if n == 0:
return ackermann(m-1, 1)
return ackermann(m-1, ackermann(m, n-1))
ackermann(3,2)
numero = 2
n = 2
for i in range(n+2):
numero = numero**2
print(numero)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ejercicio 1
Step2: Convenimos que la posición decimal de las decimas está indicada con el 0.
Step3: 2. Calcula el conjunto de los divisores naturales del número a ¿Cuánto vale la suma de todos ellos?
Step4: 3. Calcula el mayor número primo menor que a y el menor número primo mayor que a. Calcula también la suma de todos los primos menores que a.
Step5: Luego el mayor primo < a es
Step6: Vamos a calcular ahora el menor primo > que a. Para ello vamos a recurrir a itertools que es una librería del nucleo de python que nos proporciona funciones para hacer algo próximo a la programación funcional
Step7: Luego el siguiente primo mayor que a es 937
Step8: Tambíen podemos hacernos nuestro propio factorial
Step10: Vemos que da el mismo resultado
Step11: Ejercicio 2 Un número natural n es perfecto si la suma de sus divisores distintos de él vale n. Halla los números perfectos menores que 10000.
Step12: La suma de los divisores es (incluyendo al 1 y omitiendo el propio número)
Step13: Los números perfectos entre los 1000 primeos naturales son
Step14: En total son
Step15: Ejercicio 3
Step17: Como comprobar si un primo de la forma $2^{p} -1$ es una tarea muy pesada cuando p crece vamos a usar el algoritmo de Lucas-Lehmer.
Step18: En esta lista tenemos tuplas de los primeros 258 números y si son primos de Mersenne. Los que lo cumplen son
Step19: Luego Mersenne se había equivocado y la lista correcta es la anterior.
Step20: creamos una lista de primos hasta 1000+2a
Step21: Soluciones
Step22: Todas las descomposiciones como suma de primos
Step23: Numero de formas
Step24: 2. Define la función goldbach(n) que devuelva una pareja de primos que sumen n. Define la función goldbachtotal(n) que devuelva la lista de todas las parejas de primos que sumen n. Define dgoldbach(n) que devuelva el número de descomposiciones de Goldbach que admite n.
Step25: Ejercicio 5
Step26: Al intentar calcular el valor para ackermann(4,2) se desborda la pila por lo que hay que buscar otra manera de resolver el problema.
|
4,304
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
image_train = graphlab.SFrame('image_train_data/')
image_test = graphlab.SFrame('image_test_data/')
#deep_learning_model = graphlab.load_model('http://s3.amazonaws.com/GraphLab-Datasets/deeplearning/imagenet_model_iter45')
#image_train['deep_features'] = deep_learning_model.extract_features(image_train)
image_train.head()
knn_model = graphlab.nearest_neighbors.create(image_train,features=['deep_features'],
label='id')
graphlab.canvas.set_target('ipynb')
cat = image_train[18:19]
cat['image'].show()
knn_model.query(cat)
def get_images_from_ids(query_result):
return image_train.filter_by(query_result['reference_label'],'id')
cat_neighbors = get_images_from_ids(knn_model.query(cat))
cat_neighbors['image'].show()
car = image_train[8:9]
car['image'].show()
get_images_from_ids(knn_model.query(car))['image'].show()
show_neighbors = lambda i: get_images_from_ids(knn_model.query(image_train[i:i+1]))['image'].show()
show_neighbors(8)
show_neighbors(26)
auto_data = image_train[image_train['label'] == 'automobile']
cat_data = image_train[image_train['label'] == 'cat']
dog_data = image_train[image_train['label'] == 'dog']
bird_data = image_train[image_train['label'] == 'bird']
auto_model = graphlab.nearest_neighbors.create(auto_data,features=['deep_features'],
label='id')
cat_model = graphlab.nearest_neighbors.create(cat_data,features=['deep_features'],
label='id')
dog_model = graphlab.nearest_neighbors.create(dog_data,features=['deep_features'],
label='id')
bird_model = graphlab.nearest_neighbors.create(bird_data,features=['deep_features'],
label='id')
cat = image_test[0:1]
cat_model_query = cat_model.query(cat)
cat_neighbors = get_images_from_ids(cat_model_query)
#cat_neighbors
cat_neighbors['image'].show()
cat_model_query['distance'].mean()
dog_model_query = dog_model.query(cat)
dog_neighbor = get_images_from_ids(dog_model_query)
dog_neighbor['image'].show()
dog_model_query['distance'].mean()
image_test_auto = image_test[image_test['label'] == 'automobile']
image_test_cat = image_test[image_test['label'] == 'cat']
image_test_dog = image_test[image_test['label'] == 'dog']
image_test_bird = image_test[image_test['label'] == 'bird']
dog_cat_neighbors = cat_model.query(image_test_dog, k=1)
dog_dog_neighbors = dog_model.query(image_test_dog, k=1)
dog_bird_neighbors = bird_model.query(image_test_dog, k=1)
dog_auto_neighbors = auto_model.query(image_test_dog, k=1)
dog_distances = graphlab.SFrame()
dog_distances['dog-dog'] = dog_dog_neighbors['distance']
dog_distances['dog-cat'] = dog_cat_neighbors['distance']
dog_distances['dog-bird'] = dog_bird_neighbors['distance']
dog_distances['dog-auto'] = dog_auto_neighbors['distance']
dog_distances
def is_dog_correct(row):
for col_name in dog_distances.column_names():
if row['dog-dog'] > row[col_name]:
return 0
return 1
num_correct = dog_distances.apply(is_dog_correct)
num_correct.sum()
cat_model_query
cat_model.query(cat)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the CIFAR-10 dataset
Step2: Computing deep features for our images
Step3: Train a nearest-neighbors model for retrieving images using deep features
Step4: Use image retrieval model with deep features to find similar images
Step5: We are going to create a simple function to view the nearest neighbors to save typing
Step6: Very cool results showing similar cats.
Step7: Just for fun, let's create a lambda to find and show nearest neighbor images
|
4,305
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
4,306
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from numpy import linalg as LA
import itertools
A = np.random.rand(3,3)
M = A @ A.T
D, U = LA.eigh(M) # D is returned as a vector
print(M)
print()
M2 = U @ np.diag(D) @ np.conj(U.T) # Conjugation not always needed
print(M2)
M = np.random.rand(2, 3) + 1j * np.random.rand(2, 3)
U, singular_values, Vh = LA.svd(M)
S = np.zeros((2, 3))
np.fill_diagonal(S, singular_values)
print(M)
print()
M2 = U @ S @ Vh
print(M2)
print(f'singular values: {singular_values}')
print(f'squares of singlar values: {singular_values**2}')
print()
Mh = np.conjugate(np.transpose(M))
print('M Mh')
D, U = LA.eigh(M @ Mh)
print(D)
print()
print('Mh M')
D, U = LA.eigh(Mh @ M)
print(D)
def tensorSVD(T,uinds,svd_threshold=1E-16):
Computes the SVD of an N-index tensor
Args:
T: Tensor to decompose
uinds: List of indices forming the "left" effective index. These indices
belong to the U tensor, and the rest belong to Vh.
svd_threshold: Singular values smaller than this are truncated.
Returns:
tenU: Left tensor of the SVD.
tenS: Diagonal matrix of singular values.
tenV: Right tensor of the SVD.
NT = len(T.shape)
Nu = len(uinds)
dest = range(Nu) # array 0,1,2,...
pT = np.moveaxis(T,uinds,dest)
udims = [pT.shape[n] for n in range(Nu)]
vdims = [pT.shape[n] for n in range(Nu,NT)]
uD = np.prod(udims)
vD = np.prod(vdims)
rpT = np.reshape(pT,[uD,vD])
U,S,V = LA.svd(rpT,full_matrices=False)
# Determine truncation size:
n_svd = len(S)
for ix in range(n_svd):
if S[ix] < svd_threshold:
n_svd = ix
break
# Perform the truncation:
truncU = U[:,0:n_svd]
truncV = V[0:n_svd,:]
truncS = S[0:n_svd]
# Restore tensor structure to truncated U, S, V:
udims.append(n_svd)
vdims.insert(0,n_svd)
tenU = np.copy(np.reshape(truncU,udims))
tenV = np.copy(np.reshape(truncV,vdims))
tenS = np.diag(truncS)
return tenU,tenS,tenV
def makeParityIndicator(num_bits):
Creates an indicator tensor for the parity dataset. The indicator is a
tensor with 2**num_bits entries and shape (2,)*num_bits.
Example:
psi4 = makeParityIndicator(4)
psi[0,0,0,0] = 1
psi[0,0,0,1] = 0
Args:
num_bits: Length of the input strings for the indicator.
Returns:
psi: Indicator tensor.
psi = np.zeros((2,)*num_bits)
for bits in itertools.product([0, 1], repeat=num_bits):
if sum(bits) % 2 == 0:
psi[bits] = 1
#psi += 1E-14*np.random.randn(*psi.shape)
return psi
# Create indicator for four-bit parity.
psi4 = makeParityIndicator(4)
# Create the MPS tensors via a sequence of SVDs.
U1,S1,V1 = tensorSVD(psi4,[0])
print("V1.shape = ", V1.shape)
M2 = np.tensordot(S1, V1,[1,0])
print("M2.shape = ", M2.shape)
U2,S2,V2 = tensorSVD(M2, [0,1])
print("V2.shape = ", V2.shape)
M3 = np.tensordot(S2, V2, [1,0])
print("M3.shape = ", M3.shape)
U3,S3,V3 = tensorSVD(M3, [0,1])
print("V3.shape = ", V3.shape)
M4 = np.tensordot(S3, V3, [1,0])
print("M4.shape = ", M4.shape)
print("U1.shape = ",U1.shape)
print("U2.shape = ",U2.shape)
print("U3.shape = ",U3.shape)
print("M4.shape = ",M4.shape)
print("These are the non-zero elements")
print(round(np.sqrt(2)*U2[0,0,0]))
print(round(np.sqrt(2)*U2[1,1,0]))
print(round(np.sqrt(2)*U2[1,0,1]))
print(round(np.sqrt(2)*U2[0,1,1]))
print("These elements should be zero")
print(round(np.sqrt(2)*U2[0,0,1]))
print(round(np.sqrt(2)*U2[1,1,1]))
print(round(np.sqrt(2)*U2[1,0,0]))
print(round(np.sqrt(2)*U2[0,1,0]))
# Your code here
d = 2 # index dimensions
A = np.zeros((d,d,d,d,d))
for i in range(d):
for j in range(d):
for k in range(d):
for l in range(d):
for m in range(d):
A[i,j,k,l,m] = np.sqrt(i + 2*j + 3*k + 4*l + 5*m)
print(A.shape)
print(A)
# Your code here
chi = 2 # Set bond dimension for truncations.
# (a)form left density matrix and isometry
rho_L = A.reshape(d**2,d**3) @ A.reshape(d**2,d**3).T
D_L,U_L = LA.eigh(rho_L)
W_L = (U_L[:,(d**2-chi):]).reshape(d,d,chi)
# (b) form right density matrix and isometry
rho_R = A.reshape(d**3,d**2).T @ A.reshape(d**3,d**2)
D_R,U_R = LA.eigh(rho_R)
W_R = (U_R[:,(d**2-chi):]).reshape(d,d,chi)
# (c) form B tensor
B = np.einsum('ijklm,ijn,lmp->nkp',A,W_L,W_R)
print(B)
# Your code here
# check truncation error
A_recover = np.einsum('fkg,ijf,lmg->ijklm',B,W_L,W_R)
err_tot = LA.norm(A-A_recover) / LA.norm(A)
print(err_tot)
chi = 2 # set bond dimension for truncations
# define image data
image_data = np.array([[0,1,1,0,1],[1,1,0,0,1],[1,0,1,0,1],
[1,1,0,1,0],[0,1,1,0,1],[1,0,1,0,1]])
n_samples = image_data.shape[0]
n_pixels = image_data.shape[1]
# Your code here
###############################################################
def images_to_sparse(images):
Creates a sparse tensor representation of image data (one-hot encoding)
Args:
images: Array of shape (M,N). M is the number of samples, N is the number
of pixels
Returns:
pixel_tensors: list of length N, where pixel_tensors[k] is an array of
shape (P,M) representing the one-hot encoding of the kth pixels.
pixel_tensors = []
for pixel_loc in range(images.shape[1]):
pixel_tensors.append(np.array([(1-images[:,pixel_loc]),images[:,pixel_loc]]))
return pixel_tensors
###############################################################
def compute_scalar_products(pixel_tensors):
Creates the matrix of scalar products from a list of pixel_tensors.
Args:
pixel_tensors: list of tensors, where pixel_tensors[k] is an array of
shape (P,M) representing the one-hot encoding of the kth pixels.
Returns:
scalar_matrix: array of shape (M,M), formed from the product over pixels k
of the outer product of pixel_tensors[k] with itself.
M = pixel_tensors[1].shape[1]
scalar_matrix = np.ones((M,M))
for pixel in pixel_tensors:
scalar_matrix = scalar_matrix * (pixel.T @ pixel)
return scalar_matrix
###############################################################
def sparse_to_dense(pixel_tensors):
Creates a dense tensor from a list of pixel_tensors.
Args:
pixel_tensors: list of N tensors, where pixel_tensors[k] is an array of
shape (P,M) representing the one-hot encoding of the kth pixels.
Returns:
dense_tensor: array with N dimensions, where dim of kth index kth is equal
to the dim P of the kth tensor.
dense_tensor = pixel_tensors[0]
dims_P = [dense_tensor.shape[0]]
for pixel in pixel_tensors[1:]:
dim_temp = pixel.shape[0]
dense_tensor = np.kron(dense_tensor,np.ones((dim_temp,1))) * np.kron(np.ones((np.prod(dims_P),1)),pixel)
dims_P.append(dim_temp)
return np.sum(dense_tensor,1).reshape(dims_P)
###############################################################
# export image data to sparse tensor format
V = images_to_sparse(image_data)
# (a) compute left density matrix
gamma_L = compute_scalar_products(V[2:4])
V_L = np.einsum('im,jm->ijm',V[0],V[1])
rho_L = V_L.reshape((4,n_samples)) @ gamma_L @ (V_L.reshape((4,n_samples))).T
# compute left isometry
D_L, U_L = LA.eigh(rho_L)
W_L = U_L[:,(4-chi):].reshape(2,2,chi)
# (b) compute right density matrix
gamma_R = compute_scalar_products(V[0:2])
V_R = np.einsum('im,jm->ijm',V[3],V[4])
rho_R = V_R.reshape((4,n_samples)) @ gamma_R @ (V_R.reshape((4,n_samples))).T
# compute right isometry
D_R, U_R = LA.eigh(rho_R)
W_R = U_R[:,(4-chi):].reshape(2,2,chi)
# (c) coarse-grain image data using isometries to form B tensor
V_Ltemp = np.einsum('ijk,ijl->kl',W_L,V_L)
V_Rtemp = np.einsum('ijk,ijl->kl',W_R,V_R)
B = sparse_to_dense([V_Ltemp,V[2],V_Rtemp])
# Your code here
# check accuracy
A_recover = np.einsum('fkg,ijf,lmg->ijklm',B,W_L,W_R)
A_initial = sparse_to_dense(V)
err_tot = LA.norm(A_recover-A_initial)
print(err_tot)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: Singular value decomposition
Step5: MPS
Step6: We can compress the parity indicator into a MPS form by a sequence of SVDs, as explained in the lecture. The following code block
Step7: In this scheme, the four MPS tensors consist of each of the $U$ tensors from the SVDs for all but the final tensor. We can see their shapes here
Step8: We can interpret the function of these MPS tensors by looking at which entries are nonzero. In particular, for $U2$ we have the following
Step9: Part 2
Step10: Solution (i)
Step11: (ii)
Step12: Solution (ii)
Step13: (iii)
Step14: Solution (iii)
Step15: Problem 2
Step19: Solution (i)
Step20: (ii)
Step21: Solution (ii)
|
4,307
|
<ASSISTANT_TASK:>
Python Code:
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
import rx
rx
from rx import Observable, Observer
help(Observer)
dir(Observer)
help(Observable)
class MyObserver(Observer):
def on_next(self, x):
print("Got: %s" % x)
def on_error(self, e):
print("Got ERROR: %s" % x)
def on_completed(self):
print("Sequence completed")
subject = Observable.from_iterable(range(10))
subject
observer = MyObserver()
observer
subject.subscribe(observer)
def interactiveObservable(count):
print("-" * 30)
sequence = range(count)
subject = Observable.from_iterable(sequence)
observer = MyObserver()
subject.subscribe(observer)
countSlider = widgets.IntSlider(min=2, max=10, step=1, value=3)
interact(interactiveObservable, count=countSlider)
from functools import partial
def interactiveSequenceObservable(observer, count):
print("-" * 30)
sequence = range(count)
subject = Observable.from_iterable(sequence)
subject.subscribe(observer)
countSlider = widgets.IntSlider(min=2, max=10, step=1, value=3)
callback = partial(interactiveSequenceObservable, MyObserver())
callback.__name__ = interactiveSequenceObservable.__name__
interact(callback, count=countSlider)
countSlider = widgets.IntSlider(min=2, max=10, step=1, value=3)
callback = partial(interactiveSequenceObservable, print)
callback.__name__ = interactiveSequenceObservable.__name__
interact(callback, count=countSlider)
subject = Observable.from_iterable(range(10))
disposable = subject.filter(lambda x : x % 2).subscribe(print)
disposable
def interactiveFilter(observer, count, divisor):
sequence = range(1, count+1)
subject = Observable.from_iterable(sequence)
filtered = subject.filter(lambda x: x % divisor)
filtered.subscribe(observer)
callback = partial(interactiveFilter, print)
callback.__name__ = interactiveFilter.__name__
countSlider = widgets.IntSlider(min=2, max=50, value=10)
divisorSlider = widgets.IntSlider(min=2, max=10, value=2)
interact(callback, count=countSlider, divisor=divisorSlider)
subject = Observable.from_iterable(range(10))
disposable = subject.map(lambda x : x * 2).subscribe(print)
disposable
def interactiveTransformer(observer, count, code, debug=False):
sequence = range(1, count+1)
if debug:
print(sequence, tuple(sequence))
subject = Observable.from_iterable(sequence)
callback = eval(code)
filtered = subject.map(callback)
filtered.subscribe(observer)
callback = partial(interactiveTransformer, print)
callback.__name__ = interactiveTransformer.__name__
countSlider = widgets.IntSlider(min=2, max=50, value=10)
codeText = widgets.Text(value="lambda x: x % 2")
debugCheckBox = widgets.Checkbox(value=False)
interact(callback, count=countSlider, code=codeText, debug=debugCheckBox)
codeText = widgets.Text(value="lambda x: x % 2")
interact(callback, count=countSlider, code=codeText, debug=debugCheckBox)
xs = Observable.from_iterable(range(1, 6))
ys = Observable.from_iterable('abcde')
xsMys = xs.merge(ys)
xsMys.subscribe(print)
from rx.testing import marbles
xs = Observable.from_marbles('a-b-c-|')
xs.to_blocking().to_marbles()
xs = Observable.from_marbles('a-b-c-x-e')
ys = Observable.from_marbles('1-2-3-4-5')
xs.merge(ys).to_blocking().to_marbles()
xs = Observable.from_marbles('1-2-3-x-4')
ys = Observable.from_marbles('1-2-3-4-5')
xs.merge(ys).to_blocking().to_marbles()
def interactiveMarbles(stream1, stream2):
xs = Observable.from_marbles(stream1)
ys = Observable.from_marbles(stream2)
print(xs.merge(ys).to_blocking().to_marbles())
stream1Marble = widgets.Text(value="1-2-3-4-5")
stream2Marble = widgets.Text(value="a-b-c-d-e")
interact(interactiveMarbles, stream1=stream1Marble, stream2=stream2Marble)
result = eval('lambda x: x * 2')
result
tuple(map(result, (1, 2, 3, 4)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Concepts
Step2: Concepts
Step3: Reference
Step4: Filtering a Sequence
Step5: Interactive Rx - Filtering
Step6: Transforming a Sequence
Step7: Interactive Rx - Transformations
Step8: Merge
Step9: Space-Time of rx
Step10: Note
Step11: Reference
|
4,308
|
<ASSISTANT_TASK:>
Python Code:
markdown_str = ("The tables in this section show the standard association metrics between "
"*observed* human scores and different types of machine scores. "
"These results are computed on the evaluation set. `raw_trim` scores "
"are truncated to [{}, {}]. `raw_trim_round` scores are computed by first truncating "
"and then rounding the predicted score. Scaled scores are computed by re-scaling "
"the predicted scores using mean and standard deviation of human scores as observed "
"on the training data and mean and standard deviation of machine scores as predicted "
"for the training set.".format(min_score, max_score))
display(Markdown(markdown_str))
raw_or_scaled = "scaled" if use_scaled_predictions else "raw"
eval_file = join(output_dir, '{}_eval.{}'.format(experiment_id, file_format))
df_eval = DataReader.read_from_file(eval_file, index_col=0)
distribution_columns = ['N', 'h_mean', 'sys_mean', 'h_sd', 'sys_sd', 'h_min', 'sys_min', 'h_max', 'sys_max', 'SMD']
association_columns = ['N'] + [column for column in df_eval.columns if not column in distribution_columns]
df_distribution = df_eval[distribution_columns]
df_association = df_eval[association_columns]
pd.options.display.width=10
formatter = partial(color_highlighter, low=-0.15, high=0.15)
HTML('<span style="font-size:95%">'+ df_distribution.to_html(classes=['sortable'],
escape=False,
formatters={'SMD': formatter},
float_format=float_format_func) + '</span>')
markdown_str = ['The table shows the standard association metrics between human scores and machine scores.']
if continuous_human_score:
markdown_str.append("Note that for computation of `kappa` both human and machine scores are rounded.")
else:
markdown_str.append("Note that for computation of `kappa` all machine scores are rounded.")
Markdown('\n'.join(markdown_str))
pd.options.display.width=10
HTML('<span style="font-size:95%">'+ df_association.to_html(classes=['sortable'],
escape=False,
float_format=float_format_func) + '</span>')
markdown_str = ["Confusion matrix using {}, trimmed, and rounded scores and human scores (rows=system, columns=human).".format(raw_or_scaled)]
if continuous_human_score:
markdown_str.append("Note: Human scores have beeen rounded to the nearest integer.")
Markdown('\n'.join(markdown_str))
confmat_file = join(output_dir, '{}_confMatrix.{}'.format(experiment_id, file_format))
df_confmat = DataReader.read_from_file(confmat_file, index_col=0)
df_confmat
markdown_strs = ["The histogram and the table below show the distibution of "
"human scores and {}, trimmed, and rounded machine scores "
"(as % of all responses).".format(raw_or_scaled)]
markdown_strs.append("Differences in the table between human and machine distributions "
"larger than 5 percentage points are <span class='highlight_color'>highlighted</span>.")
if continuous_human_score:
markdown_strs.append("Note: Human scores have beeen rounded to the nearest integer.")
display(Markdown('\n'.join(markdown_strs)))
scoredist_file = join(output_dir, '{}_score_dist.{}'.format(experiment_id, file_format))
df_scoredist = DataReader.read_from_file(scoredist_file, index_col=0)
df_scoredist_melted = pd.melt(df_scoredist, id_vars=['score'])
df_scoredist_melted = df_scoredist_melted[df_scoredist_melted['variable'] != 'difference']
# get the colors for the plot
colors = sns.color_palette("Greys", 2)
with sns.axes_style('whitegrid'):
# make a barplot without a legend since we will
# add one manually later
p = sns.catplot(x="score", y="value", hue="variable", kind="bar",
palette=colors, data=df_scoredist_melted,
height=3, aspect=2, legend=False)
p.set_axis_labels('score', '% of responses')
# add a legend with the right colors
axis = p.axes[0][0]
legend = axis.legend(labels=('Human', 'Machine'), title='', frameon=True, fancybox=True)
legend.legendHandles[0].set_color(colors[0])
legend.legendHandles[1].set_color(colors[1])
imgfile = join(figure_dir, '{}_score_dist.svg'.format(experiment_id))
plt.savefig(imgfile)
if use_thumbnails:
show_thumbnail(imgfile, next(id_generator))
else:
plt.show()
formatter = partial(color_highlighter, low=0, high=5, absolute=True)
df_html = df_scoredist.to_html(classes=['sortable'], index=False,
escape=False, formatters={'difference': formatter})
display(HTML(df_html))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Descriptive holistic score statistics
Step2: Association statistics
Step3: Confusion matrix
Step4: Distribution of human and machine scores
|
4,309
|
<ASSISTANT_TASK:>
Python Code:
df = pd.DataFrame({'subject':['A', 'A', 'B', 'B'],
'treatment':['CH', 'DT', 'CH', 'DT'],
'concentration':range(4)},
columns=['subject', 'treatment', 'concentration'])
df
pivoted = df.pivot(index='subject', columns='treatment', values='concentration')
pivoted
from sqlalchemy import create_engine
engine = create_engine('sqlite:///data/survey.db')
visited = pd.read_sql('Visited', engine, index_col='ident', parse_dates=['dated'])
visited
readings = pd.read_sql('Survey', engine).dropna()
readings = readings.drop_duplicates()
readings
multi = df.set_index(['subject', 'treatment'])
multi
multi.loc['A'] # first level only
multi.loc[('A', 'CH')] # two level
multi.loc[(slice(None), 'CH'), :]
multi.reset_index()
result = multi['concentration'].unstack()
result
result.stack()
df = multi.stack()
df
!head -1 ./data/BETR8010000800100hour.1-1-1990.31-12-2012
data = pd.read_csv("data/BETR8010000800100hour.1-1-1990.31-12-2012", sep='\t')#, header=None)
data.head()
hours = map(str, range(24))
flags = ['flag'] * 24
col_names = ['date'] + list(sum(zip(hours, flags), ()))
col_names[:5]
data = pd.read_csv("data/BETR8010000800100hour.1-1-1990.31-12-2012", sep='\t',
na_values=['-999', '-9999'],
names=col_names,
index_col='date')#, header=None)
data = data.drop('flag', axis=1)
data.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A better representation might be one where the different subjects are in rows, the applied treatments are in columns and outcomes are in the data frame values.
Step2: If there is more that one record for each pair of "subject" and "treatment" (for example, the subject was tested twice with the same treatment at different times) you can use pivot_table. It works just like pivot but it allows to specify additionally an aggregation function ('mean' by default).
Step3: <div class="alert alert-success">
Step4: Note how the two indexes are nested
Step5: Note that it creates a standard data frame with "flat" index.
Step6: Indexing on the second index only may be slightly involved
Step7: Consult the documentation for other methods.
Step8: <div class="alert alert-success">
Step9: unstack reverses the operation
Step10: We can "stack" it even further
Step11: <div class="alert alert-success">
Step12: Just reading the tab-delimited data
Step13: The above data is clearly not ready to be used! Each row contains the 24 measurements for each hour of the day, and also contains a flag (0/1) indicating the quality of the data.
Step14: For now, we disregard the 'flag' columns
|
4,310
|
<ASSISTANT_TASK:>
Python Code:
from nilearn.plotting import plot_anat
%matplotlib inline
plot_anat('/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz', dim=-1);
%%bash
bet /data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz \
/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w_bet.nii.gz
plot_anat('/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w_bet.nii.gz', dim=-1);
from nipype.interfaces.fsl import BET
skullstrip = BET(in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz')
res = skullstrip.run()
print(res.outputs.out_file)
plot_anat(res.outputs.out_file, dim=-1);
from nipype.interfaces.base import CommandLine
CommandLine.help()
nipype_ls = CommandLine('ls', args='-lh', terminal_output='allatonce')
result = nipype_ls.run()
print(result.runtime.stdout)
%%bash
antsTransformInfo /home/neuro/nipype_tutorial/notebooks/scripts/transform.tfm
class TransformInfo(CommandLine):
_cmd = 'antsTransformInfo'
TransformInfo.help()
from nipype.interfaces.base import CommandLineInputSpec, File
class TransformInfoInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='%s',
position=0, desc='the input transform file')
class TransformInfo(CommandLine):
_cmd = 'antsTransformInfo'
input_spec = TransformInfoInputSpec
TransformInfo.help()
my_info_interface = TransformInfo(in_file='/home/neuro/nipype_tutorial/notebooks/scripts/transform.tfm')
print(my_info_interface.cmdline)
try:
TransformInfo().cmdline
except(ValueError) as err:
print('It crashed with...')
print("ValueError:", err)
else:
raise
try:
my_info_interface.inputs.in_file = 'idontexist.tfm'
except(Exception) as err:
print('It crashed with...')
print("TraitError:", err)
else:
raise
from nipype.interfaces.base import TraitedSpec, traits
class TransformInfoOutputSpec(TraitedSpec):
translation = traits.List(traits.Float, desc='the translation component of the input transform')
class TransformInfo(CommandLine):
_cmd = 'antsTransformInfo'
input_spec = TransformInfoInputSpec
output_spec = TransformInfoOutputSpec
TransformInfo.help()
my_info_interface = TransformInfo(in_file='/home/neuro/nipype_tutorial/notebooks/scripts/transform.tfm',
terminal_output='allatonce')
result = my_info_interface.run()
print(result.runtime.stdout)
class TransformInfo(CommandLine):
_cmd = 'antsTransformInfo'
input_spec = TransformInfoInputSpec
output_spec = TransformInfoOutputSpec
def _run_interface(self, runtime):
import re
# Run the command line as a natural CommandLine interface
runtime = super(TransformInfo, self)._run_interface(runtime)
# Search transform in the standard output
expr_tra = re.compile('Translation:\s+\[(?P<translation>[0-9\.-]+,\s[0-9\.-]+,\s[0-9\.-]+)\]')
trans = [float(v) for v in expr_tra.search(runtime.stdout).group('translation').split(', ')]
# Save it for later use in _list_outputs
setattr(self, '_result', trans)
# Good to go
return runtime
def _list_outputs(self):
# Get the attribute saved during _run_interface
return {'translation': getattr(self, '_result')}
my_info_interface = TransformInfo(in_file='/home/neuro/nipype_tutorial/notebooks/scripts/transform.tfm',
terminal_output='allatonce')
result = my_info_interface.run()
result.outputs.translation
from nipype.interfaces.base import (CommandLine, CommandLineInputSpec,
TraitedSpec, traits, File)
class TransformInfoInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='%s', position=0,
desc='the input transform file')
class TransformInfoOutputSpec(TraitedSpec):
translation = traits.List(traits.Float, desc='the translation component of the input transform')
class TransformInfo(CommandLine):
_cmd = 'antsTransformInfo'
input_spec = TransformInfoInputSpec
output_spec = TransformInfoOutputSpec
def _run_interface(self, runtime):
import re
# Run the command line as a natural CommandLine interface
runtime = super(TransformInfo, self)._run_interface(runtime)
# Search transform in the standard output
expr_tra = re.compile('Translation:\s+\[(?P<translation>[0-9\.-]+,\s[0-9\.-]+,\s[0-9\.-]+)\]')
trans = [float(v) for v in expr_tra.search(runtime.stdout).group('translation').split(', ')]
# Save it for later use in _list_outputs
setattr(self, '_result', trans)
# Good to go
return runtime
def _list_outputs(self):
# Get the attribute saved during _run_interface
return {'translation': getattr(self, '_result')}
my_info_interface = TransformInfo(in_file='/home/neuro/nipype_tutorial/notebooks/scripts/transform.tfm',
terminal_output='allatonce')
result = my_info_interface.run()
result.outputs.translation
from nipype.interfaces.base import CommandLineInputSpec, File, TraitedSpec
class CustomBETInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='%s', position=0, desc='the input image')
mask = traits.Bool(mandatory=False, argstr='-m', position=2, desc='create binary mask image')
# Do not set exists=True for output files!
out_file = File(mandatory=True, argstr='%s', position=1, desc='the output image')
class CustomBETOutputSpec(TraitedSpec):
out_file = File(desc='the output image')
mask_file = File(desc="path/name of binary brain mask (if generated)")
class CustomBET(CommandLine):
_cmd = 'bet'
input_spec = CustomBETInputSpec
output_spec = CustomBETOutputSpec
def _list_outputs(self):
# Get the attribute saved during _run_interface
return {'out_file': self.inputs.out_file,
'mask_file': self.inputs.out_file.replace('brain', 'brain_mask')}
my_custom_bet = CustomBET()
my_custom_bet.inputs.in_file = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'
my_custom_bet.inputs.out_file = 'sub-01_T1w_brain.nii.gz'
my_custom_bet.inputs.mask = True
result = my_custom_bet.run()
plot_anat(result.outputs.out_file, dim=-1);
def translate_image(img, translation, out_file):
import nibabel as nb
import numpy as np
from scipy.ndimage.interpolation import affine_transform
# Load the data
nii = nb.load(img)
data = nii.get_data()
# Create the transformation matrix
matrix = np.eye(3)
trans = (np.array(translation) / nii.header.get_zooms()[:3]) * np.array([1.0, -1.0, -1.0])
# Apply the transformation matrix
newdata = affine_transform(data, matrix=matrix, offset=trans)
# Save the new data in a new NIfTI image
nb.Nifti1Image(newdata, nii.affine, nii.header).to_filename(out_file)
print('Translated file now is here: %s' % out_file)
orig_image = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'
translation = [20.0, -20.0, -20.0]
translated_image = 'translated.nii.gz'
# Let's run the translate_image function on our inputs
translate_image(orig_image,
translation,
translated_image)
plot_anat(orig_image, dim=-1);
plot_anat('translated.nii.gz', dim=-1);
from nipype.interfaces.utility import Function
my_python_interface = Function(
input_names=['img', 'translation', 'out_file'],
output_names=['out_file'],
function=translate_image
)
# Set inputs
my_python_interface.inputs.img = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'
my_python_interface.inputs.translation = [-35.0, 35.0, 35.0]
my_python_interface.inputs.out_file = 'translated_functioninterface.nii.gz'
# Run the interface
result = my_python_interface.run()
# Plot the result
plot_anat('translated_functioninterface.nii.gz', dim=-1);
from nipype.interfaces.base import BaseInterfaceInputSpec, File, TraitedSpec
class TranslateImageInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='the input image')
out_file = File(mandatory=True, desc='the output image') # Do not set exists=True !!
translation = traits.List([50.0, 0.0, 0.0], traits.Float, usedefault=True,
desc='the translation component of the input transform')
class TranslateImageOutputSpec(TraitedSpec):
out_file = File(desc='the output image')
from nipype.interfaces.base import BaseInterface
class TranslateImage(BaseInterface):
input_spec = TranslateImageInputSpec
output_spec = TranslateImageOutputSpec
TranslateImage.help()
will_fail_at_run = TranslateImage(
in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz',
out_file='translated.nii.gz')
try:
result = will_fail_at_run.run()
except(NotImplementedError) as err:
print('It crashed with...')
print("NotImplementedError:", err)
else:
raise
class TranslateImage(BaseInterface):
input_spec = TranslateImageInputSpec
output_spec = TranslateImageOutputSpec
def _run_interface(self, runtime):
# Call our python code here:
translate_image(
self.inputs.in_file,
self.inputs.translation,
self.inputs.out_file
)
# And we are done
return runtime
half_works = TranslateImage(
in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz',
out_file='translated_nipype.nii.gz')
try:
result = half_works.run()
except(NotImplementedError) as err:
print('It crashed with...')
print("NotImplementedError:", err)
else:
raise
from nipype.interfaces.base import BaseInterfaceInputSpec, BaseInterface, File, TraitedSpec
class TranslateImageInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='the input image')
out_file = File(mandatory=True, desc='the output image') # Do not set exists=True !!
translation = traits.List([50.0, 0.0, 0.0], traits.Float, usedefault=True,
desc='the translation component of the input transform')
class TranslateImageOutputSpec(TraitedSpec):
out_file = File(desc='the output image')
class TranslateImage(BaseInterface):
input_spec = TranslateImageInputSpec
output_spec = TranslateImageOutputSpec
def _run_interface(self, runtime):
# Call our python code here:
translate_image(
self.inputs.in_file,
self.inputs.translation,
self.inputs.out_file
)
# And we are done
return runtime
def _list_outputs(self):
return {'out_file': self.inputs.out_file}
this_works = TranslateImage(
in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz',
out_file='translated_nipype.nii.gz')
result = this_works.run()
plot_anat(result.outputs.out_file, dim=-1);
from nipype.interfaces.base import (CommandLine, traits, TraitedSpec,
BaseInterface, BaseInterfaceInputSpec, File)
class BrainVolumeMATLABInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True)
script_file = File(exists=True, mandatory=True)
class BrainVolumeMATLABOutputSpec(TraitedSpec):
volume = traits.Int(desc='brain volume')
class BrainVolumeMATLAB(BaseInterface):
input_spec = BrainVolumeMATLABInputSpec
output_spec = BrainVolumeMATLABOutputSpec
# Specify the interface inputs
in_file = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'
script_file = '/home/neuro/nipype_tutorial/notebooks/scripts/brainvolume.m'
!cat scripts/brainvolume.m
import re
import nibabel as nb
from scipy.io import savemat
# 1. save the image in matlab format as tmp_image.mat
tmp_image = 'tmp_image.mat'
data = nb.load(in_file).get_data()
savemat(tmp_image, {'data': data}, do_compression=False)
# 2. load script
with open(script_file) as script_file:
script_content = script_file.read()
# 3. replace the input_image.mat file with the actual input of this interface
with open('newscript.m', 'w') as script_file:
script_file.write(script_content.replace('input_image.mat', 'tmp_image.mat'))
# 4. run the matlab script
mlab = CommandLine('octave', args='newscript.m', terminal_output='stream')
result = mlab.run()
# 5. extract the volume estimation from the output
expr_tra = re.compile('total\ =\s+(?P<total>[0-9]+)')
volume = int(expr_tra.search(result.runtime.stdout).groupdict()['total'])
print(volume)
from nipype.interfaces.base import (CommandLine, traits, TraitedSpec,
BaseInterface, BaseInterfaceInputSpec, File)
import re
import nibabel as nb
from scipy.io import savemat
class BrainVolumeMATLABInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True)
script_file = File(exists=True, mandatory=True)
class BrainVolumeMATLABOutputSpec(TraitedSpec):
volume = traits.Int(desc='brain volume')
class BrainVolumeMATLAB(BaseInterface):
input_spec = BrainVolumeMATLABInputSpec
output_spec = BrainVolumeMATLABOutputSpec
def _run_interface(self, runtime):
# Save the image in matlab format as tmp_image.mat
tmp_image = 'tmp_image.mat'
data = nb.load(self.inputs.in_file).get_data()
savemat(tmp_image, {'data': data}, do_compression=False)
# Load script
with open(self.inputs.script_file) as script_file:
script_content = script_file.read()
# Replace the input_image.mat file for the actual input of this interface
with open('newscript.m', 'w') as script_file:
script_file.write(script_content.replace('input_image.mat', 'tmp_image.mat'))
# Run a matlab command
mlab = CommandLine('octave', args='newscript.m', terminal_output='stream')
result = mlab.run()
expr_tra = re.compile('total\ =\s+(?P<total>[0-9]+)')
volume = int(expr_tra.search(result.runtime.stdout).groupdict()['total'])
setattr(self, '_result', volume)
return result.runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['volume'] = getattr(self, '_result')
return outputs
matlab = BrainVolumeMATLAB(in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz',
script_file='/home/neuro/nipype_tutorial/notebooks/scripts/brainvolume.m')
result = matlab.run()
print(result.outputs)
# Write your solution here
from nipype.interfaces.base import (CommandLine, traits, TraitedSpec,
BaseInterface, BaseInterfaceInputSpec, File)
import os
import re
import nibabel as nb
from scipy.io import savemat
class BrainVolumeMATLABInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True)
script_file = File(exists=True, mandatory=True)
class BrainVolumeMATLABOutputSpec(TraitedSpec):
volume = traits.Int(desc='brain volume')
out_file = File(desc='output file containing total brain volume') # This line was added
class BrainVolumeMATLAB(BaseInterface):
input_spec = BrainVolumeMATLABInputSpec
output_spec = BrainVolumeMATLABOutputSpec
def _run_interface(self, runtime):
# Save the image in matlab format as tmp_image.mat
tmp_image = 'tmp_image.mat'
data = nb.load(self.inputs.in_file).get_data()
savemat(tmp_image, {'data': data}, do_compression=False)
# Load script
with open(self.inputs.script_file) as script_file:
script_content = script_file.read()
# Replace the input_image.mat file for the actual input of this interface
with open('newscript.m', 'w') as script_file:
script_file.write(script_content.replace('input_image.mat', 'tmp_image.mat'))
# Run a matlab command
mlab = CommandLine('octave', args='newscript.m', terminal_output='stream')
result = mlab.run()
expr_tra = re.compile('total\ =\s+(?P<total>[0-9]+)')
volume = int(expr_tra.search(result.runtime.stdout).groupdict()['total'])
setattr(self, '_result', volume)
# Write total brain volume into a file
out_fname = os.path.abspath('volume.txt')
setattr(self, '_out_file', out_fname)
with open('volume.txt', 'w') as out_file:
out_file.write('%d' %volume)
return result.runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['volume'] = getattr(self, '_result')
outputs['out_file'] = getattr(self, '_out_file')
return outputs
matlab = BrainVolumeMATLAB(in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz',
script_file='/home/neuro/nipype_tutorial/notebooks/scripts/brainvolume.m')
result = matlab.run()
print(result.outputs.out_file)
!cat volume.txt
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example of interface
Step2: ... which yields the following
Step3: Using nipype, the equivalent is a bit more verbose
Step4: Now we can verify that the result is exactly the same as before. Please note that, since we are using a Python environment, we use the result of the execution to point our plot_anat function to the output image of running BET
Step5: What are the main parts of a Nipype interface?
Step6: As a quick example, let's wrap bash's ls with Nipype
Step7: Now, we have a Python object nipype_ls that is a runnable nipype interface. After execution, Nipype interface returns a result object. We can retrieve the output of our ls invocation from the result.runtime property
Step8: Create your own CommandLine interface
Step9: So let's plan our implementation
Step10: This is enough to have a nipype compatible interface for this tool
Step11: Specifying the inputs
Step12: Some settings are done for this File object
Step13: Our interface now has one mandatory input, and inherits some optional inputs from the CommandLineInputSpec
Step14: One interesting feature of the Nipype interface is that the underlying command line can be checked using the object property cmdline. The command line can only be built when the mandatory inputs are set, so let's instantiate our new Interface for the first time, and check the underlying command line
Step15: Nipype will make sure that the parameters fulfill their prescribed attributes. For instance, in_file is mandatory. An error is issued if we build the command line or try to run this interface without it
Step16: It will also complain if we try to set a non-existent file
Step17: Specifying the outputs
Step18: And now, our new output is in place
Step19: We are almost there - final needs
Step20: We need to complete the functionality of the run() member of our interface to parse the standard output. This is done extending its _run_interface() member.
Step21: Let's run this interface (we set terminal_output='allatonce' to reduce the length of this manual, default would otherwise be 'stream')
Step22: Now we can retrieve our outcome of interest as an output
Step23: Summary of a CommandLine interface
Step24: Wrapping up - fast use case for simple CommandLine wrapper
Step25: Create your own Python interface
Step26: Let's see how this function operates
Step27: Now that the function was executed, let's plot the original and the translated image.
Step28: Perfect, we see that the translation was applied.
Step29: The arguments of translate_image should ideally be listed in the same order and with the same names as in the signature of the function. The same should be the case for the outputs. Finally, the Function interface takes a function input that is pointed to your python code.
Step30: Complete approach - pure Python interface
Step31: Similarily to the change of base class for the input specification, the core of our new interface will derive from BaseInterface instead of CommandLineInterface
Step32: At this point, we have defined a pure python interface but it is unable to do anything because we didn't implement a _run_interface() method yet.
Step33: What happens if we try to run such an interface without specifying the _run_interface() function?
Step34: So, let's implement the missing part. As we would imagine, this needs to be very similar to what we did before with the TransformInfo interface
Step35: If we run it know, our interface will get further
Step36: ... but still, it crashes becasue we haven't specified any _list_outputs() method. I.e. our python function is called, but the interface crashes when the execution arrives to retrieving the outputs.
Step37: Now, we have everything together. So let's run it and visualize the output file.
Step38: Create your own MATLAB interface
Step39: Step by step implementation
Step40: Putting it all together
Step41: Let's test it
Step42: We see in the example above that everything works fine. But now, let's say that we want to save the total brain volume to a file and give the location of this file back as an output. How would you do that?
Step43: Now, let's test if it works.
Step44: No errors, perfect. Did we get the right file?
Step45: And what about the content of this file?
|
4,311
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
import time
import google.cloud.aiplatform_v1beta1 as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# Tabular Dataset type
DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/tables_1.0.0.yaml"
# Tabular Labeling type
LABEL_SCHEMA = (
"gs://google-cloud-aiplatform/schema/dataset/ioformat/table_io_format_1.0.0.yaml"
)
# Tabular Training task
TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tables_1.0.0.yaml"
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
IMPORT_FILE = "gs://cloud-samples-data/tables/iris_1000.csv"
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
TIMEOUT = 90
def create_dataset(name, schema, src_uri=None, labels=None, timeout=TIMEOUT):
start_time = time.time()
try:
if src_uri.startswith("gs://"):
metadata = {"input_config": {"gcs_source": {"uri": [src_uri]}}}
elif src_uri.startswith("bq://"):
metadata = {"input_config": {"bigquery_source": {"uri": [src_uri]}}}
dataset = aip.Dataset(
display_name=name,
metadata_schema_uri=schema,
labels=labels,
metadata=json_format.ParseDict(metadata, Value()),
)
operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
result = operation.result(timeout=TIMEOUT)
print("time:", time.time() - start_time)
print("response")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" metadata_schema_uri:", result.metadata_schema_uri)
print(" metadata:", dict(result.metadata))
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
print(" etag:", result.etag)
print(" labels:", dict(result.labels))
return result
except Exception as e:
print("exception:", e)
return None
result = create_dataset("iris-" + TIMESTAMP, DATA_SCHEMA, src_uri=IMPORT_FILE)
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
def create_pipeline(pipeline_name, model_name, dataset, schema, task):
dataset_id = dataset.split("/")[-1]
input_config = {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
}
training_pipeline = {
"display_name": pipeline_name,
"training_task_definition": schema,
"training_task_inputs": task,
"input_data_config": input_config,
"model_to_upload": {"display_name": model_name},
}
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
TRANSFORMATIONS = [
{"auto": {"column_name": "sepal_width"}},
{"auto": {"column_name": "sepal_length"}},
{"auto": {"column_name": "petal_length"}},
{"auto": {"column_name": "petal_width"}},
]
PIPE_NAME = "iris_pipe-" + TIMESTAMP
MODEL_NAME = "iris_model-" + TIMESTAMP
task = Value(
struct_value=Struct(
fields={
"target_column": Value(string_value=label_column),
"prediction_type": Value(string_value="classification"),
"train_budget_milli_node_hours": Value(number_value=1000),
"disable_early_stopping": Value(bool_value=False),
"transformations": json_format.ParseDict(TRANSFORMATIONS, Value()),
}
)
)
response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
def list_model_evaluations(name):
response = clients["model"].list_model_evaluations(parent=name)
for evaluation in response:
print("model_evaluation")
print(" name:", evaluation.name)
print(" metrics_schema_uri:", evaluation.metrics_schema_uri)
metrics = json_format.MessageToDict(evaluation._pb.metrics)
for metric in metrics.keys():
print(metric)
print("logloss", metrics["logLoss"])
print("auPrc", metrics["auPrc"])
return evaluation.name
last_evaluation = list_model_evaluations(model_to_deploy_id)
ENDPOINT_NAME = "iris_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "iris_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"enable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
INSTANCE = {
"petal_length": "1.4",
"petal_width": "1.3",
"sepal_length": "5.1",
"sepal_width": "2.8",
}
def explain_item(
data_items, endpoint, parameters_dict, deployed_model_id, silent=False
):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances = [json_format.ParseDict(s, Value()) for s in data_items]
response = clients["prediction"].explain(
endpoint=endpoint,
instances=instances,
parameters=parameters,
deployed_model_id=deployed_model_id,
)
if silent:
return response
print("response")
print(" deployed_model_id:", response.deployed_model_id)
try:
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", dict(prediction))
except:
pass
explanations = response.explanations
print("explanations")
for explanation in explanations:
print(explanation)
return response
response = explain_item([INSTANCE], endpoint_id, None, None)
import numpy as np
try:
predictions = response.predictions
label = np.argmax(predictions[0]["scores"])
cls = predictions[0]["classes"][label]
print("Predicted Value:", cls, predictions[0]["scores"][label])
except:
pass
from tabulate import tabulate
feature_names = ["petal_length", "petal_width", "sepal_length", "sepal_width"]
attributions = response.explanations[0].attributions[0].feature_attributions
rows = []
for i, val in enumerate(feature_names):
rows.append([val, INSTANCE[val], attributions[val]])
print(tabulate(rows, headers=["Feature name", "Feature value", "Attribution value"]))
import random
# Prepare 10 test examples to your model for prediction using a random distribution to generate
# test instances
instances = []
for i in range(10):
pl = str(random.uniform(1.0, 2.0))
pw = str(random.uniform(1.0, 2.0))
sl = str(random.uniform(4.0, 6.0))
sw = str(random.uniform(2.0, 4.0))
instances.append(
{"petal_length": pl, "petal_width": pw, "sepal_length": sl, "sepal_width": sw}
)
response = explain_item(instances, endpoint_id, None, None, silent=True)
def sanity_check_explanations(
explanation, prediction, mean_tgt_value=None, variance_tgt_value=None
):
passed_test = 0
total_test = 1
# `attributions` is a dict where keys are the feature names
# and values are the feature attributions for each feature
baseline_score = explanation.attributions[0].baseline_output_value
print("baseline:", baseline_score)
# Sanity check 1
# The prediction at the input is equal to that at the baseline.
# Please use a different baseline. Some suggestions are: random input, training
# set mean.
if abs(prediction - baseline_score) <= 0.05:
print("Warning: example score and baseline score are too close.")
print("You might not get attributions.")
else:
passed_test += 1
print("Sanity Check 1: Passed")
print(passed_test, " out of ", total_test, " sanity checks passed.")
i = 0
for explanation in response.explanations:
try:
prediction = np.max(response.predictions[i]["scores"])
except TypeError:
prediction = np.max(response.predictions[i])
sanity_check_explanations(explanation, prediction)
i += 1
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Set up variables
Step8: Vertex constants
Step9: AutoML constants
Step10: Hardware Accelerators
Step11: Container (Docker) image
Step12: Tutorial
Step13: Dataset
Step14: Quick peek at your data
Step15: Dataset
Step16: Now save the unique dataset identifier for the Dataset resource instance you created.
Step17: Train the model
Step18: Construct the task requirements
Step19: Now save the unique identifier of the training pipeline you created.
Step20: Get information on a training pipeline
Step21: Deployment
Step22: Model information
Step23: Deploy the Model resource
Step24: Now get the unique identifier for the Endpoint resource you created.
Step25: Compute instance scaling
Step26: Deploy Model resource to the Endpoint resource
Step27: Make a online prediction request with explainability
Step28: Make a prediction with explanation
Step29: Understanding the explanations response
Step30: Examine feature attributions
Step31: Check your explanations and baselines
Step32: Sanity check
Step33: Undeploy the Model resource
Step34: Cleaning up
|
4,312
|
<ASSISTANT_TASK:>
Python Code:
from bigbang.archive import Archive
urls = [#"analytics",
"conferences",
"design",
"education",
"gendergap",
"historic",
"hot",
"ietf-privacy",
"ipython-dev",
"ipython-user",
"languages",
"maps-l",
"numpy-discussion",
"playground",
"potlatch-dev",
"python-committers",
"python-dev",
"scipy-dev",
"scipy-user",
"social-media",
"spambayes",
#"wikien-l",
"wikimedia-l"]
archives= [(url,Archive(url,archive_dir="../archives")) for url in urls]
archives = dict(archives)
import bigbang.graph as graph
igs = dict([(k,graph.messages_to_interaction_graph(v.data)) for (k,v) in list(archives.items())])
igs
import networkx as nx
def draw_interaction_graph(ig):
pos = nx.graphviz_layout(ig,prog='neato')
node_size = [data['sent'] * 4 for name,data in ig.nodes(data=True)]
nx.draw(ig,
pos,
node_size = node_size,
node_color = 'b',
alpha = 0.4,
font_size=18,
font_weight='bold'
)
# edge width is proportional to replies sent
edgewidth=[d['weight'] for (u,v,d) in ig.edges(data=True)]
#overlay edges with width based on weight
nx.draw_networkx_edges(ig,pos,alpha=0.5,width=edgewidth,edge_color='r')
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure(550,figsize=(12.5, 7.5))
for ln,ig in list(igs.items()):
print(ln)
try:
plt.subplot(550 + i)
#print nx.degree_assortativity_coefficient(ig)
draw_interaction_graph(ig)
except:
print('plotting failure')
plt.show()
for ln,ig in list(igs.items()):
print(ln, len(ig.nodes()), nx.degree_assortativity_coefficient(ig,weight='weight'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above code reads in preprocessed email archive data. These mailing lists are from a variety of different sources
Step2: Now we have processed the mailing lists into interaction graphs based on replies. This is what those graphs look like
Step3: Well, that didn't work out so well...
|
4,313
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
def gram_linear(x):
Compute Gram (kernel) matrix for a linear kernel.
Args:
x: A num_examples x num_features matrix of features.
Returns:
A num_examples x num_examples Gram matrix of examples.
return x.dot(x.T)
def gram_rbf(x, threshold=1.0):
Compute Gram (kernel) matrix for an RBF kernel.
Args:
x: A num_examples x num_features matrix of features.
threshold: Fraction of median Euclidean distance to use as RBF kernel
bandwidth. (This is the heuristic we use in the paper. There are other
possible ways to set the bandwidth; we didn't try them.)
Returns:
A num_examples x num_examples Gram matrix of examples.
dot_products = x.dot(x.T)
sq_norms = np.diag(dot_products)
sq_distances = -2 * dot_products + sq_norms[:, None] + sq_norms[None, :]
sq_median_distance = np.median(sq_distances)
return np.exp(-sq_distances / (2 * threshold ** 2 * sq_median_distance))
def center_gram(gram, unbiased=False):
Center a symmetric Gram matrix.
This is equvialent to centering the (possibly infinite-dimensional) features
induced by the kernel before computing the Gram matrix.
Args:
gram: A num_examples x num_examples symmetric matrix.
unbiased: Whether to adjust the Gram matrix in order to compute an unbiased
estimate of HSIC. Note that this estimator may be negative.
Returns:
A symmetric matrix with centered columns and rows.
if not np.allclose(gram, gram.T):
raise ValueError('Input must be a symmetric matrix.')
gram = gram.copy()
if unbiased:
# This formulation of the U-statistic, from Szekely, G. J., & Rizzo, M.
# L. (2014). Partial distance correlation with methods for dissimilarities.
# The Annals of Statistics, 42(6), 2382-2412, seems to be more numerically
# stable than the alternative from Song et al. (2007).
n = gram.shape[0]
np.fill_diagonal(gram, 0)
means = np.sum(gram, 0, dtype=np.float64) / (n - 2)
means -= np.sum(means) / (2 * (n - 1))
gram -= means[:, None]
gram -= means[None, :]
np.fill_diagonal(gram, 0)
else:
means = np.mean(gram, 0, dtype=np.float64)
means -= np.mean(means) / 2
gram -= means[:, None]
gram -= means[None, :]
return gram
def cka(gram_x, gram_y, debiased=False):
Compute CKA.
Args:
gram_x: A num_examples x num_examples Gram matrix.
gram_y: A num_examples x num_examples Gram matrix.
debiased: Use unbiased estimator of HSIC. CKA may still be biased.
Returns:
The value of CKA between X and Y.
gram_x = center_gram(gram_x, unbiased=debiased)
gram_y = center_gram(gram_y, unbiased=debiased)
# Note: To obtain HSIC, this should be divided by (n-1)**2 (biased variant) or
# n*(n-3) (unbiased variant), but this cancels for CKA.
scaled_hsic = gram_x.ravel().dot(gram_y.ravel())
normalization_x = np.linalg.norm(gram_x)
normalization_y = np.linalg.norm(gram_y)
return scaled_hsic / (normalization_x * normalization_y)
def _debiased_dot_product_similarity_helper(
xty, sum_squared_rows_x, sum_squared_rows_y, squared_norm_x, squared_norm_y,
n):
Helper for computing debiased dot product similarity (i.e. linear HSIC).
# This formula can be derived by manipulating the unbiased estimator from
# Song et al. (2007).
return (
xty - n / (n - 2.) * sum_squared_rows_x.dot(sum_squared_rows_y)
+ squared_norm_x * squared_norm_y / ((n - 1) * (n - 2)))
def feature_space_linear_cka(features_x, features_y, debiased=False):
Compute CKA with a linear kernel, in feature space.
This is typically faster than computing the Gram matrix when there are fewer
features than examples.
Args:
features_x: A num_examples x num_features matrix of features.
features_y: A num_examples x num_features matrix of features.
debiased: Use unbiased estimator of dot product similarity. CKA may still be
biased. Note that this estimator may be negative.
Returns:
The value of CKA between X and Y.
features_x = features_x - np.mean(features_x, 0, keepdims=True)
features_y = features_y - np.mean(features_y, 0, keepdims=True)
dot_product_similarity = np.linalg.norm(features_x.T.dot(features_y)) ** 2
normalization_x = np.linalg.norm(features_x.T.dot(features_x))
normalization_y = np.linalg.norm(features_y.T.dot(features_y))
if debiased:
n = features_x.shape[0]
# Equivalent to np.sum(features_x ** 2, 1) but avoids an intermediate array.
sum_squared_rows_x = np.einsum('ij,ij->i', features_x, features_x)
sum_squared_rows_y = np.einsum('ij,ij->i', features_y, features_y)
squared_norm_x = np.sum(sum_squared_rows_x)
squared_norm_y = np.sum(sum_squared_rows_y)
dot_product_similarity = _debiased_dot_product_similarity_helper(
dot_product_similarity, sum_squared_rows_x, sum_squared_rows_y,
squared_norm_x, squared_norm_y, n)
normalization_x = np.sqrt(_debiased_dot_product_similarity_helper(
normalization_x ** 2, sum_squared_rows_x, sum_squared_rows_x,
squared_norm_x, squared_norm_x, n))
normalization_y = np.sqrt(_debiased_dot_product_similarity_helper(
normalization_y ** 2, sum_squared_rows_y, sum_squared_rows_y,
squared_norm_y, squared_norm_y, n))
return dot_product_similarity / (normalization_x * normalization_y)
np.random.seed(1337)
X = np.random.randn(100, 10)
Y = np.random.randn(100, 10) + X
cka_from_examples = cka(gram_linear(X), gram_linear(Y))
cka_from_features = feature_space_linear_cka(X, Y)
print('Linear CKA from Examples: {:.5f}'.format(cka_from_examples))
print('Linear CKA from Features: {:.5f}'.format(cka_from_features))
np.testing.assert_almost_equal(cka_from_examples, cka_from_features)
rbf_cka = cka(gram_rbf(X, 0.5), gram_rbf(Y, 0.5))
print('RBF CKA: {:.5f}'.format(rbf_cka))
cka_from_examples_debiased = cka(gram_linear(X), gram_linear(Y), debiased=True)
cka_from_features_debiased = feature_space_linear_cka(X, Y, debiased=True)
print('Linear CKA from Examples (Debiased): {:.5f}'.format(
cka_from_examples_debiased))
print('Linear CKA from Features (Debiased): {:.5f}'.format(
cka_from_features_debiased))
np.testing.assert_almost_equal(cka_from_examples_debiased,
cka_from_features_debiased)
def cca(features_x, features_y):
Compute the mean squared CCA correlation (R^2_{CCA}).
Args:
features_x: A num_examples x num_features matrix of features.
features_y: A num_examples x num_features matrix of features.
Returns:
The mean squared CCA correlations between X and Y.
qx, _ = np.linalg.qr(features_x) # Or use SVD with full_matrices=False.
qy, _ = np.linalg.qr(features_y)
return np.linalg.norm(qx.T.dot(qy)) ** 2 / min(
features_x.shape[1], features_y.shape[1])
print('Mean Squared CCA Correlation: {:.5f}'.format(cca(X, Y)))
transform = np.random.randn(10, 10)
_, orthogonal_transform = np.linalg.eigh(transform.T.dot(transform))
# CKA is invariant only to orthogonal transformations.
np.testing.assert_almost_equal(
feature_space_linear_cka(X, Y),
feature_space_linear_cka(X.dot(orthogonal_transform), Y))
np.testing.assert_(not np.isclose(
feature_space_linear_cka(X, Y),
feature_space_linear_cka(X.dot(transform), Y)))
# CCA is invariant to any invertible linear transform.
np.testing.assert_almost_equal(cca(X, Y), cca(X.dot(orthogonal_transform), Y))
np.testing.assert_almost_equal(cca(X, Y), cca(X.dot(transform), Y))
# Both CCA and CKA are invariant to isotropic scaling.
np.testing.assert_almost_equal(cca(X, Y), cca(X * 1.337, Y))
np.testing.assert_almost_equal(
feature_space_linear_cka(X, Y),
feature_space_linear_cka(X * 1.337, Y))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step6: Demo code for "Similarity of Neural Network Representations Revisited"
Step7: Tutorial
Step8: Linear CKA can be computed either based on dot products between examples or dot products between features
Step9: It is also possible to compute CKA with nonlinear kernels. Here, we use an RBF kernel with the bandwidth set to $\frac{1}{2}$ the median distance in the distance matrix.
Step10: If the number of examples is small, it might help to compute a "debiased" form of CKA. This form of CKA can be obtained by recognizing that HSIC can be formulated as a U-statistic, as in Song et al., 2007, and replacing biased estimators of HSIC in the numerator and denominator with this unbiased estimator. With some further algebraic manipulation, we also derived an unbiased estimator based on similarities between features rather than the Gram matrices. The resulting estimator of CKA is still generally biased, but the bias is reduced.
Step12: CKA vs. CCA
Step13: Invariance Properties
|
4,314
|
<ASSISTANT_TASK:>
Python Code:
a=45
print(a)
b=25
print(b)
x="Hello Everybody!"
print(x)
Amiright=True
print(Amiright)
fl = 0.54
print(fl)
print(a)
print(b)
c=a+b
print(c)
c=a/b
print(c)
c=c+a
print(c)
#addition
print(2+2)
#division or quotient
print(42/42)
#multiplication
print(2*3)
#subtraction
print(2-8)
#exponent or power
print(2**3)
#modulo or remainder
print(10%7)
#additive equality
a=56
a+=10
print(a)
#greater than
print(10>5)
#less than
print(10<5)
#is equal to
print(11==10)
#is not equal to
print(10!=10)
#and
print(11==11 and 12>10)
#or
print(10==11 or 5<7)
string="hiya"
print(len(string))
print(string[0])
string1=" there!"
print(string+string1)
print(string-string1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Okay, neat. Can I only make variables that hold numbers?
Step2: Wait a minute, the numerical variables above are only integers...can we use decimal points?
Step3: Wow look at all those variables! What do I call them all?
Step4: Note that our a and b didn't go away. Variables will stick around until you change them.
Step5: We have the usual arithmetic operators, $+, -, /, $. On top of that we also have the power operator, $*$, the modulo operator $\%$ which returns the remainder from a quotient, the additive equality operator $+=$, and the equality operator, $=$. We also have the comparison operators; $>,<,==, !=, and, or$. Which mean, greater than, less than, is equal to, is not equal to, and, or.
Step6: Exercises
|
4,315
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-3', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
4,316
|
<ASSISTANT_TASK:>
Python Code:
import h2o
from h2o.estimators.uplift_random_forest import H2OUpliftRandomForestEstimator
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.style as style
import pandas as pd
h2o.init(strict_version_check=False) # max_mem_size=10
control_name = "control"
treatment_column = "treatment"
response_column = "visit"
feature_cols = ["f"+str(x) for x in range(0,12)]
df = pd.read_csv("http://mr-0xd4:50070/webhdfs/v1/datasets/criteo/v2.1/criteo-research-uplift-v2.1.csv?op=OPEN")
df.head()
print('Total number of samples: {}'.format(len(df)))
print('The dataset is largely imbalanced: ')
print(df['treatment'].value_counts(normalize = True))
print('Percentage of users that visit: {}%'.format(100*round(df['visit'].mean(),4)))
print('Percentage of users that convert: {}%'.format(100*round(df['conversion'].mean(),4)))
print('Percentage of visitors that convert: {}%'.format(100*round(df[df["visit"]==1]["conversion"].mean(),4)))
# Print proportion of a binary column
# https://www.kaggle.com/code/hughhuyton/criteo-uplift-modelling/notebook
def print_proportion(df, column):
fig = plt.figure(figsize = (10,6))
target_count = df[column].value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
print('Proportion:', int(round(target_count[1] / target_count[0])), ': 1')
target_count.plot(kind='bar', title='Treatment Class Distribution', color=['#2077B4', '#FF7F0E'], fontsize = 15)
plt.xticks(rotation=0)
print_proportion(df, treatment_column)
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(df, test_size=0.2, random_state=42, stratify=df['treatment'])
print(train_df.shape)
print(test_df.shape)
del(df)
print_proportion(train_df, treatment_column)
# Random Undersampling (finding the majority class and undersampling it)
# https://www.kaggle.com/code/hughhuyton/criteo-uplift-modelling/notebook
def random_under(df, feature):
target = df[feature].value_counts()
if target.values[0]<target.values[1]:
under = target.index.values[1]
else:
under = target.index.values[0]
df_0 = df[df[feature] != under]
df_1 = df[df[feature] == under]
df_treatment_under = df_1.sample(len(df_0))
df_1 = pd.concat([df_treatment_under, df_0], axis=0)
return df_1
train_df = random_under(train_df, treatment_column)
print_proportion(train_df, treatment_column)
# method to transfor data for LGWUM method, will be explained later
def target_class_lgwum(df, treatment, target, column_name):
#CN:
df[column_name] = 0
#CR:
df.loc[(df[treatment] == 0) & (df[target] != 0), column_name] = 1
#TN:
df.loc[(df[treatment] != 0) & (df[target] == 0), column_name] = 2
#TR:
df.loc[(df[treatment] != 0) & (df[target] != 0), column_name] = 3
return df
response_column_lgwum = "lqwum_response"
train_df = target_class_lgwum(train_df, treatment_column, response_column, response_column_lgwum)
test_df = target_class_lgwum(test_df, treatment_column, response_column, response_column_lgwum)
h2o_train_df = h2o.H2OFrame(train_df)
del(train_df)
h2o_train_df[treatment_column] = h2o_train_df[treatment_column].asfactor()
h2o_train_df[response_column] = h2o_train_df[response_column].asfactor()
h2o_train_df[response_column_lgwum] = h2o_train_df[response_column_lgwum].asfactor()
h2o_test_df = h2o.H2OFrame(test_df)
h2o_test_df[treatment_column] = h2o_test_df[treatment_column].asfactor()
h2o_test_df[response_column] = h2o_test_df[response_column].asfactor()
h2o_test_df[response_column_lgwum] = h2o_test_df[response_column_lgwum].asfactor()
del(test_df)
ntree = 20
max_depth = 15
metric="Euclidean"
h2o_uplift_model = H2OUpliftRandomForestEstimator(
ntrees=ntree,
max_depth=max_depth,
min_rows=30,
nbins=1000,
sample_rate=0.80,
score_each_iteration=True,
treatment_column=treatment_column,
uplift_metric=metric,
auuc_nbins=1000,
auuc_type="gain",
seed=42)
h2o_uplift_model.train(y=response_column, x=feature_cols, training_frame=h2o_train_df)
h2o_uplift_model
# Plot uplift score
# source https://www.kaggle.com/code/hughhuyton/criteo-uplift-modelling/notebook
def plot_uplift_score(uplift_score):
plt.figure(figsize = (10,6))
plt.xlim(-.05, .1)
plt.hist(uplift_score, bins=1000, color=['#2077B4'])
plt.xlabel('Uplift score')
plt.ylabel('Number of observations in validation set')
h2o_uplift_pred = h2o_uplift_model.predict(h2o_test_df)
h2o_uplift_pred
plot_uplift_score(h2o_uplift_pred['uplift_predict'].as_data_frame().uplift_predict)
perf_h2o = h2o_uplift_model.model_performance(h2o_test_df)
perf_h2o.auuc_table()
perf_h2o.plot_uplift(metric="qini")
perf_h2o.plot_uplift(metric="gain")
perf_h2o.plot_uplift(metric="lift")
perf_h2o.aecu_table()
perf_h2o.plot_uplift(metric="gain", normalize=True)
perf_h2o.auuc_normalized()
h2o_uplift_model.scoring_history()
from h2o.estimators.gbm import H2OGradientBoostingEstimator
h2o_gbm_lgwum = H2OGradientBoostingEstimator(ntrees=ntree,
max_depth=max_depth,
min_rows=30,
nbins=1000,
score_each_iteration=False,
seed=42)
h2o_gbm_lgwum.train(y=response_column_lgwum, x=feature_cols, training_frame=h2o_train_df)
h2o_gbm_lgwum
uplift_predict_lgwum = h2o_gbm_lgwum.predict(h2o_test_df)
result = uplift_predict_lgwum.as_data_frame()
result.columns = ['predict', 'p_cn', 'p_cr', 'p_tn', 'p_tr']
result['uplift_score'] = result.eval('\
p_cn/(p_cn + p_cr) \
+ p_tr/(p_tn + p_tr) \
- p_tn/(p_tn + p_tr) \
- p_cr/(p_cn + p_cr)')
result
plot_uplift_score(result.uplift_score)
lgwum_predict = h2o.H2OFrame(result['uplift_score'].tolist())
perf_lgwum = h2o.make_metrics(lgwum_predict, h2o_test_df[response_column], treatment=h2o_test_df[treatment_column], auuc_type="gain", auuc_nbins=81)
perf_lgwum
perf_h2o.plot_uplift(metric="qini")
perf_lgwum.plot_uplift(metric="qini")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Prepare data
Step3: Import data to H2O
Step4: Train H2O UpliftDRF model
Step5: Predict and plot Uplift Score
Step6: Evaluate the model
Step7: Area Under Uplift Curve (AUUC) calculation
Step8: Cumulative Uplift curve plot
Step9: Qini value and Average Excess Cumulative Uplift (AECU)
Step10: Normalized AUUC
Step11: Scoring histrory and importance of number of trees
Step12: Comparasion Tree-based approach and Generalized Weighed Uplift (LGWUM)
|
4,317
|
<ASSISTANT_TASK:>
Python Code:
import os
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
import pprint
assert 'COLAB_TPU_ADDR' in os.environ, 'Did you forget to switch to TPU?'
tpu_address = 'grpc://' + os.environ['COLAB_TPU_ADDR']
with tf.Session(tpu_address) as sess:
devices = sess.list_devices()
pprint.pprint(devices)
device_is_tpu = [True if 'TPU' in str(x) else False for x in devices]
assert True in device_is_tpu, 'Did you forget to switch to TPU?'
import json
import os
import pprint
import re
import time
import tensorflow.compat.v1 as tf
import tensorflow_gcs_config
# Google Cloud Storage bucket for Estimator logs and storing
# the training dataset.
bucket = '' #@param {type:"string"}
assert bucket, 'Must specify an existing GCS bucket name'
print('Using bucket: {}'.format(bucket))
model_dir = 'gs://{}/{}'.format(
bucket, time.strftime('tpuestimator-tfgan/%Y-%m-%d-%H-%M-%S'))
print('Using model dir: {}'.format(model_dir))
assert 'COLAB_TPU_ADDR' in os.environ, 'Missing TPU; did you request a TPU in Notebook Settings?'
tpu_address = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
from google.colab import auth
auth.authenticate_user()
# Upload credentials to TPU.
tf.config.experimental_connect_to_host(tpu_address)
tensorflow_gcs_config.configure_gcs_from_colab_auth()
# Now credentials are set for all future sessions on this TPU.
# Check that imports for the rest of the file work.
import os
import tensorflow.compat.v1 as tf
!pip install tensorflow-gan
import tensorflow_gan as tfgan
import tensorflow_datasets as tfds
import tensorflow_hub as hub
import numpy as np
import matplotlib.pyplot as plt
# Allow matplotlib images to render immediately.
%matplotlib inline
# Disable noisy outputs.
tf.logging.set_verbosity(tf.logging.ERROR)
tf.autograph.set_verbosity(0, False)
import warnings
warnings.filterwarnings("ignore")
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
dataset_dir = 'gs://{}/{}'.format(bucket, 'datasets')
def input_fn(mode, params):
assert 'batch_size' in params
assert 'noise_dims' in params
bs = params['batch_size']
nd = params['noise_dims']
split = 'train' if mode == tf.estimator.ModeKeys.TRAIN else 'test'
shuffle = (mode == tf.estimator.ModeKeys.TRAIN)
just_noise = (mode == tf.estimator.ModeKeys.PREDICT)
noise_ds = (tf.data.Dataset.from_tensors(0)
.map(lambda _: tf.random.normal([bs, nd]))
# If 'predict', just generate one batch.
.repeat(1 if just_noise else None))
if just_noise:
return noise_ds
def _preprocess(element):
# Map [0, 255] to [-1, 1].
images = (tf.cast(element['image'], tf.float32) - 127.5) / 127.5
return images
images_ds = (tfds.load('cifar10:3.*.*', split=split, data_dir=dataset_dir)
.map(_preprocess, num_parallel_calls=4)
.cache()
.repeat())
if shuffle:
images_ds = images_ds.shuffle(
buffer_size=10000, reshuffle_each_iteration=True)
images_ds = (images_ds.batch(bs, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
return tf.data.Dataset.zip((noise_ds, images_ds))
def noise_input_fn(params):
np.random.seed(0)
np_noise = np.random.randn(params['batch_size'], params['noise_dims'])
return tf.data.Dataset.from_tensors(tf.constant(np_noise, dtype=tf.float32))
params = {'batch_size': 1, 'noise_dims':1}
input_fn(tf.estimator.ModeKeys.EVAL, params)
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import tensorflow_gan as tfgan
params = {'batch_size': 80, 'noise_dims':64}
ds = input_fn(tf.estimator.ModeKeys.EVAL, params)
numpy_imgs = next(iter(tfds.as_numpy(ds)))[1]
image_grid = tfgan.eval.python_image_grid(numpy_imgs, grid_shape=(8, 10))
def _show_image_grid(image_grid):
plt.axis('off')
plt.imshow((image_grid + 1.0) / 2.0, # [-1, 1] -> [0, 1]
aspect='auto')
plt.show()
_show_image_grid(image_grid)
def _leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=0.2)
def _batch_norm(x, is_training, name):
return tf.layers.batch_normalization(
x, momentum=0.9, epsilon=1e-5, training=is_training, name=name)
def _dense(x, channels, name):
return tf.layers.dense(
x, channels,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _conv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _deconv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d_transpose(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def discriminator(images, unused_conditioning, is_training=True,
scope='Discriminator'):
Discriminator for CIFAR images.
Args:
images: A Tensor of shape [batch size, width, height, channels], that can be
either real or generated. It is the discriminator's goal to distinguish
between the two.
unused_conditioning: The TFGAN API can help with conditional GANs, which
would require extra `condition` information to both the generator and the
discriminator. Since this example is not conditional, we do not use this
argument.
is_training: If `True`, batch norm uses batch statistics. If `False`, batch
norm uses the exponential moving average collected from population
statistics.
scope: A variable scope or string for the discriminator.
Returns:
A 1D Tensor of shape [batch size] representing the confidence that the
images are real. The output can lie in [-inf, inf], with positive values
indicating high confidence that the images are real.
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = _conv2d(images, 64, 5, 2, name='d_conv1')
x = _leaky_relu(x)
x = _conv2d(x, 128, 5, 2, name='d_conv2')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn2'))
x = _conv2d(x, 256, 5, 2, name='d_conv3')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn3'))
x = tf.reshape(x, [-1, 4 * 4 * 256])
x = _dense(x, 1, name='d_fc_4')
return x
def generator(noise, is_training=True, scope='Generator'):
Generator to produce CIFAR images.
Args:
noise: A 2D Tensor of shape [batch size, noise dim]. Since this example
does not use conditioning, this Tensor represents a noise vector of some
kind that will be reshaped by the generator into CIFAR examples.
is_training: If `True`, batch norm uses batch statistics. If `False`, batch
norm uses the exponential moving average collected from population
statistics.
scope: A variable scope or string for the generator.
Returns:
A single Tensor with a batch of generated CIFAR images.
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
net = _dense(noise, 4096, name='g_fc1')
net = tf.nn.relu(_batch_norm(net, is_training, name='g_bn1'))
net = tf.reshape(net, [-1, 4, 4, 256])
net = _deconv2d(net, 128, 5, 2, name='g_dconv2')
net = tf.nn.relu(_batch_norm(net, is_training, name='g_bn2'))
net = _deconv2d(net, 64, 4, 2, name='g_dconv3')
net = tf.nn.relu(_batch_norm(net, is_training, name='g_bn3'))
net = _deconv2d(net, 3, 4, 2, name='g_dconv4')
net = tf.tanh(net)
return net
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
import tensorflow_hub as hub
import numpy as np
tf.disable_eager_execution()
eval_batch_size = 4000 #@param
images_per_batch = 2000 #@param
def get_real_image_logits(num_images, classifier_model):
Returns an array with logits from real images and a CIFAR classifier.
We normally want many thousands of examples to run eval. However, we can't fit
inference for all of them in memory at once. Instead, we use TF-GAN eval utils
to more efficiently manage memory.
Args:
num_images: Total number of images to produce logits for.
classifier_model: A Python function that takes images and produces logits.
Returns:
A numpy array of logits of shape close to [num_images, ?].
ds = input_fn(tf.estimator.ModeKeys.TRAIN,
{'batch_size': images_per_batch, 'noise_dims': 1})
iterator = tf.data.make_one_shot_iterator(ds)
cifar_imgs = iterator.get_next()[1]
real_logits = classifier_model(cifar_imgs)
with tf.train.MonitoredSession() as sess:
logits = sess.run(real_logits)
assert len(logits.shape) == 2
assert logits.shape[0] == num_images
return logits
def init_global_real_logits():
Initialize a global variable with classifier logits for real data.
# We can hold all the real logits in memory at once, since CIFAR10 isn't that
# big. Be sure to calculate it only once.
global real_logits
try:
real_logits is not None
except NameError:
with tf.Graph().as_default():
classifier_model = hub.Module("https://tfhub.dev/deepmind/ganeval-cifar10-convnet/1")
real_logits = get_real_image_logits(
eval_batch_size, classifier_model)
assert real_logits.shape == (eval_batch_size, 10)
def calculate_real_data_classifier_score():
Calculate the classifier score on real data logits.
assert real_logits is not None
classifier_score = tfgan.eval.classifier_score_from_logits(real_logits)
with tf.train.MonitoredSession() as sess:
cscore_real = sess.run(classifier_score)
return cscore_real
def get_inception_score_and_fid(est):
Calculate our evaluation metrics.
global real_logits
assert real_logits is not None
tf.reset_default_graph()
# We dont' want to hold all the images and activations at once, so use a
# memory-efficient utility.
def sample_fn():
predictions = np.array([x['generated_data'] for x in est.predict(input_fn)])
assert predictions.shape == (images_per_batch, 32, 32, 3)
return predictions
fake_imgs = tf.concat(
[sample_fn() for _ in range(eval_batch_size // images_per_batch)], axis=0)
classifier_fn = hub.Module("https://tfhub.dev/deepmind/ganeval-cifar10-convnet/1")
fake_logits = classifier_fn(fake_imgs)
fake_logits.shape.assert_is_compatible_with([eval_batch_size, 10])
classifier_score = tfgan.eval.classifier_score_from_logits(fake_logits)
fid = tfgan.eval.frechet_classifier_distance_from_activations(
real_logits, fake_logits)
with tf.train.MonitoredSession() as sess:
cscore_np, fid_np = sess.run([classifier_score, fid])
return cscore_np, fid_np
import os
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
tf.disable_eager_execution()
noise_dims = 1024 #@param
generator_lr = 0.0002 #@param
discriminator_lr = 0.0002 #@param
train_batch_size = 1024 #@param
config = tf.estimator.tpu.RunConfig(
model_dir=model_dir,
master=tpu_address,
tpu_config=tf.estimator.tpu.TPUConfig(iterations_per_loop=images_per_batch))
est = tfgan.estimator.TPUGANEstimator(
generator_fn=generator,
discriminator_fn=discriminator,
generator_loss_fn=tfgan.losses.modified_generator_loss,
discriminator_loss_fn=tfgan.losses.modified_discriminator_loss,
generator_optimizer=tf.train.AdamOptimizer(generator_lr, 0.5),
discriminator_optimizer=tf.train.AdamOptimizer(discriminator_lr, 0.5),
joint_train=False, # train G and D sequentially instead of jointly.
train_batch_size=train_batch_size,
predict_batch_size=images_per_batch,
use_tpu=True,
params={'noise_dims': noise_dims},
config=config)
import time
import matplotlib.pyplot as plt
max_steps = 50000 #@param
steps_per_eval = 5000 #@param
cur_step = 0
start_time = time.time()
cscores, fids, steps = [], [], []
init_global_real_logits()
print('Initialized classifier logits for real data.')
classifier_score_real_data = calculate_real_data_classifier_score()
print('Calculated classifier score for real data.')
while cur_step < max_steps:
# Train for a fixed number of steps.
start_step = cur_step
step_to_stop_at = min(cur_step + steps_per_eval, max_steps)
start = time.time()
est.train(input_fn, max_steps=step_to_stop_at)
end = time.time()
cur_step = step_to_stop_at
# Print some performance statistics.
steps_taken = step_to_stop_at - start_step
time_taken = end - start
steps_per_sec = steps_taken / time_taken
min_since_start = (time.time() - start_time) / 60.0
print("Current step: %i, %.4f steps / sec, time since start: %.1f min" % (
cur_step, steps_per_sec, min_since_start))
# Calculate some evaluation metrics.
eval_start_time = time.time()
cscore, fid = get_inception_score_and_fid(est)
eval_time = (time.time() - eval_start_time)
cscores.append(cscore)
fids.append(fid)
steps.append(cur_step)
print("Classifier score: %.2f / %.2f, FID: %.1f, "
"time to calculate eval: %.2f sec" % (
cscore, classifier_score_real_data, fid, eval_time))
# Generate and show some predictions.
predictions = np.array(
[x['generated_data'] for x in est.predict(noise_input_fn)])[:80]
image_grid = tfgan.eval.python_image_grid(predictions, grid_shape=(8, 10))
_show_image_grid(image_grid)
# Plot the metrics vs step.
plt.title('Frechet distance per step')
plt.plot(steps, fids)
plt.figure()
plt.title('Classifier Score per step')
plt.plot(steps, cscores)
plt.plot(steps, [classifier_score_real_data] * len(steps))
plt.figure()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication
Step2: Check imports
Step3: Train and evaluate a GAN model on TPU using TF-GAN.
Step4: Download the data. TensorFlow Datsets will write the data once to your GCS bucket, then reuse it for future calls.
Step5: Sanity check the inputs.
Step8: Neural Net Architecture
Step13: Eval Utilities
Step14: Estimator
Step15: Train and Eval Loop
|
4,318
|
<ASSISTANT_TASK:>
Python Code:
%%bash
sudo pip freeze | grep google-cloud-bigquery==1.6.1 || \
sudo pip install google-cloud-bigquery==1.6.1
from google.cloud import bigquery
import pandas as pd
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
PROJECT = "cloud-training-demos" # Replace with your PROJECT
bq = bigquery.Client(project = PROJECT)
modulo_divisor = 100
train_percent = 80.0
eval_percent = 10.0
train_buckets = int(modulo_divisor * train_percent / 100.0)
eval_buckets = int(modulo_divisor * eval_percent / 100.0)
def display_dataframe_head_from_query(query, count=10):
Displays count rows from dataframe head from query.
Args:
query: str, query to be run on BigQuery, results stored in dataframe.
count: int, number of results from head of dataframe to display.
Returns:
Dataframe head with count number of results.
df = bq.query(
query + " LIMIT {limit}".format(
limit=count)).to_dataframe()
return df.head(count)
# Get label, features, and columns to hash and split into buckets
hash_cols_fixed_query =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
year,
month,
CASE
WHEN day IS NULL THEN
CASE
WHEN wday IS NULL THEN 0
ELSE wday
END
ELSE day
END AS date,
IFNULL(state, "Unknown") AS state,
IFNULL(mother_birth_state, "Unknown") AS mother_birth_state
FROM
publicdata.samples.natality
WHERE
year > 2000
AND weight_pounds > 0
AND mother_age > 0
AND plurality > 0
AND gestation_weeks > 0
display_dataframe_head_from_query(hash_cols_fixed_query)
data_query =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(
CONCAT(
CAST(year AS STRING),
CAST(month AS STRING),
CAST(date AS STRING),
CAST(state AS STRING),
CAST(mother_birth_state AS STRING)
)
) AS hash_values
FROM
({CTE_hash_cols_fixed})
.format(CTE_hash_cols_fixed=hash_cols_fixed_query)
display_dataframe_head_from_query(data_query)
# Get the counts of each of the unique hashs of our splitting column
first_bucketing_query =
SELECT
hash_values,
COUNT(*) AS num_records
FROM
({CTE_data})
GROUP BY
hash_values
.format(CTE_data=data_query)
display_dataframe_head_from_query(first_bucketing_query)
# Get the number of records in each of the hash buckets
second_bucketing_query =
SELECT
ABS(MOD(hash_values, {modulo_divisor})) AS bucket_index,
SUM(num_records) AS num_records
FROM
({CTE_first_bucketing})
GROUP BY
ABS(MOD(hash_values, {modulo_divisor}))
.format(
CTE_first_bucketing=first_bucketing_query, modulo_divisor=modulo_divisor)
display_dataframe_head_from_query(second_bucketing_query)
# Calculate the overall percentages
percentages_query =
SELECT
bucket_index,
num_records,
CAST(num_records AS FLOAT64) / (
SELECT
SUM(num_records)
FROM
({CTE_second_bucketing})) AS percent_records
FROM
({CTE_second_bucketing})
.format(CTE_second_bucketing=second_bucketing_query)
display_dataframe_head_from_query(percentages_query)
# Choose hash buckets for training and pull in their statistics
train_query =
SELECT
*,
"train" AS dataset_name
FROM
({CTE_percentages})
WHERE
bucket_index >= 0
AND bucket_index < {train_buckets}
.format(
CTE_percentages=percentages_query,
train_buckets=train_buckets)
display_dataframe_head_from_query(train_query)
# Choose hash buckets for validation and pull in their statistics
eval_query =
SELECT
*,
"eval" AS dataset_name
FROM
({CTE_percentages})
WHERE
bucket_index >= {train_buckets}
AND bucket_index < {cum_eval_buckets}
.format(
CTE_percentages=percentages_query,
train_buckets=train_buckets,
cum_eval_buckets=train_buckets + eval_buckets)
display_dataframe_head_from_query(eval_query)
# Choose hash buckets for testing and pull in their statistics
test_query =
SELECT
*,
"test" AS dataset_name
FROM
({CTE_percentages})
WHERE
bucket_index >= {cum_eval_buckets}
AND bucket_index < {modulo_divisor}
.format(
CTE_percentages=percentages_query,
cum_eval_buckets=train_buckets + eval_buckets,
modulo_divisor=modulo_divisor)
display_dataframe_head_from_query(test_query)
# Union the training, validation, and testing dataset statistics
union_query =
SELECT
0 AS dataset_id,
*
FROM
({CTE_train})
UNION ALL
SELECT
1 AS dataset_id,
*
FROM
({CTE_eval})
UNION ALL
SELECT
2 AS dataset_id,
*
FROM
({CTE_test})
.format(CTE_train=train_query, CTE_eval=eval_query, CTE_test=test_query)
display_dataframe_head_from_query(union_query)
# Show final splitting and associated statistics
split_query =
SELECT
dataset_id,
dataset_name,
SUM(num_records) AS num_records,
SUM(percent_records) AS percent_records
FROM
({CTE_union})
GROUP BY
dataset_id,
dataset_name
ORDER BY
dataset_id
.format(CTE_union=union_query)
display_dataframe_head_from_query(split_query)
# every_n allows us to subsample from each of the hash values
# This helps us get approximately the record counts we want
every_n = 1000
splitting_string = "ABS(MOD(hash_values, {0} * {1}))".format(every_n, modulo_divisor)
def create_data_split_sample_df(query_string, splitting_string, lo, up):
Creates a dataframe with a sample of a data split.
Args:
query_string: str, query to run to generate splits.
splitting_string: str, modulo string to split by.
lo: float, lower bound for bucket filtering for split.
up: float, upper bound for bucket filtering for split.
Returns:
Dataframe containing data split sample.
query = "SELECT * FROM ({0}) WHERE {1} >= {2} and {1} < {3}".format(
query_string, splitting_string, int(lo), int(up))
df = bq.query(query).to_dataframe()
return df
train_df = create_data_split_sample_df(
data_query, splitting_string,
lo=0, up=train_percent)
eval_df = create_data_split_sample_df(
data_query, splitting_string,
lo=train_percent, up=train_percent + eval_percent)
test_df = create_data_split_sample_df(
data_query, splitting_string,
lo=train_percent + eval_percent, up=modulo_divisor)
print("There are {} examples in the train dataset.".format(len(train_df)))
print("There are {} examples in the validation dataset.".format(len(eval_df)))
print("There are {} examples in the test dataset.".format(len(test_df)))
train_df.head()
train_df.describe()
def preprocess(df):
Preprocess pandas dataframe for augmented babyweight data.
Args:
df: Dataframe containing raw babyweight data.
Returns:
Pandas dataframe containing preprocessed raw babyweight data as well
as simulated no ultrasound data masking some of the original data.
# Clean up raw data
# Filter out what we don"t want to use for training
df = df[df.weight_pounds > 0]
df = df[df.mother_age > 0]
df = df[df.gestation_weeks > 0]
df = df[df.plurality > 0]
# Modify plurality field to be a string
twins_etc = dict(zip([1,2,3,4,5],
["Single(1)",
"Twins(2)",
"Triplets(3)",
"Quadruplets(4)",
"Quintuplets(5)"]))
df["plurality"].replace(twins_etc, inplace=True)
# Clone data and mask certain columns to simulate lack of ultrasound
no_ultrasound = df.copy(deep=True)
# Modify is_male
no_ultrasound["is_male"] = "Unknown"
# Modify plurality
condition = no_ultrasound["plurality"] != "Single(1)"
no_ultrasound.loc[condition, "plurality"] = "Multiple(2+)"
# Concatenate both datasets together and shuffle
return pd.concat(
[df, no_ultrasound]).sample(frac=1).reset_index(drop=True)
train_df = preprocess(train_df)
eval_df = preprocess(eval_df)
test_df = preprocess(test_df)
train_df.head()
train_df.tail()
train_df.describe()
# Define columns
columns = ["weight_pounds",
"is_male",
"mother_age",
"plurality",
"gestation_weeks"]
# Write out CSV files
train_df.to_csv(
path_or_buf="train.csv", columns=columns, header=False, index=False)
eval_df.to_csv(
path_or_buf="eval.csv", columns=columns, header=False, index=False)
test_df.to_csv(
path_or_buf="test.csv", columns=columns, header=False, index=False)
%%bash
wc -l *.csv
%%bash
head *.csv
%%bash
tail *.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import necessary libraries.
Step2: Set environment variables so that we can use them throughout the entire lab. We will be using our project name for our bucket, so you only need to change your project and region.
Step3: Create ML datasets by sampling using BigQuery
Step4: We need to figure out the right way to divide our hash values to get our desired splits. To do that we need to define some values to hash with in the modulo. Feel free to play around with these values to get the perfect combination.
Step6: We can make a series of queries to check if our bucketing values result in the correct sizes of each of our dataset splits and then adjust accordingly. Therefore, to make our code more compact and reusable, let's define a function to return the head of a dataframe produced from our queries up to a certain number of rows.
Step8: For our first query, we're going to use the original query above to get our label, features, and columns to combine into our hash which we will use to perform our repeatable splitting. There are only a limited number of years, months, days, and states in the dataset. Let's see what the hash values are. We will need to include all of these extra columns to hash on to get a fairly uniform spread of the data. Feel free to try less or more in the hash and see how it changes your results.
Step10: Using COALESCE would provide the same result as the nested CASE WHEN. This is preferable when all we want is the first non-null instance. To be precise the CASE WHEN would become COALESCE(wday, day, 0) AS date. You can read more about it here.
Step12: The next query is going to find the counts of each of the unique 657484 hash_values. This will be our first step at making actual hash buckets for our split via the GROUP BY.
Step14: The query below performs a second layer of bucketing where now for each of these bucket indices we count the number of records.
Step16: The number of records is hard for us to easily understand the split, so we will normalize the count into percentage of the data in each of the hash buckets in the next query.
Step18: We'll now select the range of buckets to be used in training.
Step20: We'll do the same by selecting the range of buckets to be used evaluation.
Step22: Lastly, we'll select the hash buckets to be used for the test split.
Step24: In the below query, we'll UNION ALL all of the datasets together so that all three sets of hash buckets will be within one table. We added dataset_id so that we can sort on it in the query after.
Step26: Lastly, we'll show the final split between train, eval, and test sets. We can see both the number of records and percent of the total data. It is really close to the 80/10/10 that we were hoping to get.
Step28: Now that we know that our splitting values produce a good global splitting on our data, here's a way to get a well-distributed portion of the data in such a way that the train/eval/test sets do not overlap and takes a subsample of our global splits.
Step29: Preprocess data using Pandas
Step30: Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data)
Step32: It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a preprocess function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect.
Step33: Let's process the train/eval/test set and see a small sample of the training data after our preprocessing
Step34: Let's look again at a summary of the dataset. Note that we only see numeric columns, so plurality does not show up.
Step35: Write to .csv files
|
4,319
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License")
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
!pip install --quiet -U apache-beam
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry 🥕Carrot 🍆Eggplant',
'🍅Tomato 🥔Potato',
])
| 'Split words' >> beam.FlatMap(str.split)
| beam.Map(print)
)
import apache_beam as beam
def split_words(text):
return text.split(',')
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(split_words)
| beam.Map(print)
)
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
['🍓Strawberry', '🥕Carrot', '🍆Eggplant'],
['🍅Tomato', '🥔Potato'],
])
| 'Flatten lists' >> beam.FlatMap(lambda elements: elements)
| beam.Map(print)
)
import apache_beam as beam
def generate_elements(elements):
for element in elements:
yield element
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
['🍓Strawberry', '🥕Carrot', '🍆Eggplant'],
['🍅Tomato', '🥔Potato'],
])
| 'Flatten lists' >> beam.FlatMap(generate_elements)
| beam.Map(print)
)
import apache_beam as beam
def format_plant(icon, plant):
if icon:
yield '{}{}'.format(icon, plant)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
('🍓', 'Strawberry'),
('🥕', 'Carrot'),
('🍆', 'Eggplant'),
('🍅', 'Tomato'),
('🥔', 'Potato'),
(None, 'Invalid'),
])
| 'Format' >> beam.FlatMapTuple(format_plant)
| beam.Map(print)
)
import apache_beam as beam
def split_words(text, delimiter=None):
return text.split(delimiter)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(split_words, delimiter=',')
| beam.Map(print)
)
import apache_beam as beam
with beam.Pipeline() as pipeline:
delimiter = pipeline | 'Create delimiter' >> beam.Create([','])
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(
lambda text, delimiter: text.split(delimiter),
delimiter=beam.pvalue.AsSingleton(delimiter),
)
| beam.Map(print)
)
import apache_beam as beam
def normalize_and_validate_durations(plant, valid_durations):
plant['duration'] = plant['duration'].lower()
if plant['duration'] in valid_durations:
yield plant
with beam.Pipeline() as pipeline:
valid_durations = pipeline | 'Valid durations' >> beam.Create([
'annual',
'biennial',
'perennial',
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 'Perennial'},
{'icon': '🥕', 'name': 'Carrot', 'duration': 'BIENNIAL'},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'},
{'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'},
{'icon': '🥔', 'name': 'Potato', 'duration': 'unknown'},
])
| 'Normalize and validate durations' >> beam.FlatMap(
normalize_and_validate_durations,
valid_durations=beam.pvalue.AsIter(valid_durations),
)
| beam.Map(print)
)
import apache_beam as beam
def replace_duration_if_valid(plant, durations):
if plant['duration'] in durations:
plant['duration'] = durations[plant['duration']]
yield plant
with beam.Pipeline() as pipeline:
durations = pipeline | 'Durations dict' >> beam.Create([
(0, 'annual'),
(1, 'biennial'),
(2, 'perennial'),
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 2},
{'icon': '🥕', 'name': 'Carrot', 'duration': 1},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 2},
{'icon': '🍅', 'name': 'Tomato', 'duration': 0},
{'icon': '🥔', 'name': 'Potato', 'duration': -1},
])
| 'Replace duration if valid' >> beam.FlatMap(
replace_duration_if_valid,
durations=beam.pvalue.AsDict(durations),
)
| beam.Map(print)
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: FlatMap
Step2: Examples
Step3: <table align="left" style="margin-right
Step4: <table align="left" style="margin-right
Step5: <table align="left" style="margin-right
Step6: <table align="left" style="margin-right
Step7: <table align="left" style="margin-right
Step8: <table align="left" style="margin-right
Step9: <table align="left" style="margin-right
Step10: <table align="left" style="margin-right
|
4,320
|
<ASSISTANT_TASK:>
Python Code:
# the output of plotting commands is displayed inline within frontends,
# directly below the code cell that produced it
%matplotlib inline
from __future__ import print_function
# this python library provides generic shallow (copy) and deep copy (deepcopy) operations
from copy import deepcopy
# import from Ocelot graphical modules
import sys, os
from ocelot import *
from ocelot.utils.xfel_utils import *
from ocelot.gui.accelerator import *
from ocelot.gui.genesis_plot import *
#from ocelot.optics.elements import Filter_freq
import numpy as np
from copy import copy
#import matplotlib.pyplot as plt
# load beam distribution
E_beam=8.5 #[GeV]
E_photon=250 #[eV]
# defining the undulator
lperiod=0.068
nperiods=73
und = Undulator(lperiod=lperiod, nperiods=nperiods, Kx=1.0);
und.Kx = Ephoton2K(E_photon, und.lperiod, E_beam)
# defining of the drifts
d2 = Drift (l=4*und.lperiod)
d3 = Drift (l=7*und.lperiod)
# defining of the quads
qf = Quadrupole (l=6*und.lperiod, k1=-7.3)
qd = Quadrupole (l=6*und.lperiod, k1=7.3)
qdh=deepcopy(qd)
qdh.l/=2
# creating of the cell
extra_fodo = (und, d2, qdh)
cell_ps = (und, d2, qf, d3, und, d2, qd, d3)
l_fodo= MagneticLattice(cell_ps).totalLen/2
sase3 = MagneticLattice((und, d2, qd, d3) + 11*cell_ps)
up = UndulatorParameters(und,E_beam)
up.printParameters()
beamf = read_beam_file('beam.dat')
fig=plt.figure()
fig.set_size_inches((20,15))
plot_beam(fig, beamf)
beta_av = 20.0
beam=get_beam_peak(beamf)
beam.E=E_beam
rematch(beta_av, l_fodo, qdh, sase3, extra_fodo, beam, qf, qd)
beamf = transform_beam_file(beamf ,transform = [ [beam.beta_x,beam.alpha_x], [beam.beta_y,beam.alpha_y] ], energy_new = beam.E, emit_scale = 1.0)
beamf = cut_beam(beamf,[-2e-6, 2e-6])
fig=plt.figure()
fig.set_size_inches((20,15))
plot_beam(fig, beamf)
def f1(n, n0, a0, a1, a2):
'''
piecewise-quadratic tapering function
'''
for i in xrange(1,len(n0)):
if n < n0[i]:
return a0 + (n-n0[i-1])*a1[i-1] + (n-n0[i-1])**2 * a2[i-1]
a0 += (n0[i]-n0[i-1])*a1[i-1] + (n0[i]-n0[i-1])**2 * a2[i-1]
return 1.0
tap_start=3 #number of undulators
lin_tap=0.01 #taper step
quad_tap=0.0
n = 60
n0 = [0,tap_start,60]
a0 = und.Kx
a1 = [0,lin_tap*a0]
a2 = [0,quad_tap]
taper_func1 = lambda n : f1(n, n0, a0, a1, a2)
sase3= taper(sase3, taper_func1)
run_dir = 'gen_stst' #directory to dump data
run_id=0 # run number (subdirectory 'run_#') for statistical studies
try:
os.makedirs(run_dir)
except:
pass
launcher = get_genesis_launcher('genesis') # launcher object to start genesis
inp = generate_input(up, beam, itdp=False)
inp.lattice_str = generate_lattice(sase3, unit = up.lw, energy = beam.E) #generate Genesis lattice based on Ocelot lattice object
inp.beam_file_str = beam_file_str(beamf)
#inp.beamfile = 'tmp.beam'
inp.runid = run_id
inp.run_dir = run_dir
inp.ipseed = 17111+7*run_id # defines shot-noise, changes automatically
# below other Genesis parameters may be specified, like prad0, dgrid, etc.....
print(inp.input())
print(inp.lattice_str)
print(inp.beam_file_str)
#g = run(inp,launcher)
exp_dir='/some_directory'
run_number=10
run_ids = xrange(0,run_number)
start_stage = 1
stop_stage = 4
# set simulation parameters
# prepare electron beam file
stage=1
if start_stage <= stage and stop_stage >= stage:
for run_id in run_ids:
run_dir = exp_dir + 'run_' + str(run_id)
#prepare input, specify parameters
#inp.ipseed = 17111*(run_id + 1)
#
#
#
#g = run(inp,launcher)
print('run #',run_id, ' of stage ',stage)
stage=2
if start_stage <= stage and stop_stage >= stage:
for run_id in run_ids:
run_dir = exp_dir + 'run_' + str(run_id)
#prepare input based on stage 1 output
#inp.ipseed = 27222*(run_id + 1)
#
#inp.distfile = 'run.'+ str(inp.runid)+'.s1.gout.dist'
#
#g = run(inp,launcher)
print('run #',run_id, ' of stage ',stage)
stage=3
if start_stage <= stage and stop_stage >= stage:
for run_id in run_ids:
run_dir = exp_dir + 'run_' + str(run_id)
#prepare input based on stage 1 output
#inp.ipseed = 37333*(run_id + 1)
#
#inp.distfile = 'run.'+ str(inp.runid)+'.s1.gout.dist'
#
#g = run(inp,launcher)
print('run #',run_id, ' of stage ',stage)
#stage=n .........
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting input parameters
Step2: Creating SASE3 lattice
Step3: Load beam file
Step4: Plot beamfile
Step5: Match beam file
Step6: Tapering the undulator
Step7: specify the run_dir - directory into which the experimental results will be saved
Step8: generate Genesis input object
Step9: now all the genesis input files are created, such as lattice file, beam file, input file.
Step10: Genesis may be executed with the following command
Step11: if "run" function is placed in a sctipt, the following post-processing code will be executed after the GENESIS simulation is finished
|
4,321
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
from thinkbayes2 import Pmf, Suite
from thinkbayes2 import MakeMixture
from fractions import Fraction
d1 = Pmf({'Red':Fraction(2), 'Blue':Fraction(4)}, label='d1 (bluish) ')
d1.Print()
d2 = Pmf({'Red':Fraction(4), 'Blue':Fraction(2)}, label='d2 (reddish)')
d2.Print()
dice = Pmf({d1:Fraction(1), d2:Fraction(1)})
dice.Print()
dice.Random().Random()
def rollA(dice):
while True:
die = dice.Random()
roll = die.Random()
yield roll
iterA = rollA(dice)
for i in range(5):
print(next(iterA))
Pmf(next(iterA) for i in range(1000))
MakeMixture(dice).Print()
class Dice(Suite):
def Likelihood(self, data, hypo):
data: 'Red' or 'Blue'
hypo: a Die object
return hypo[data]
prior = Dice({d1:Fraction(1), d2:Fraction(1)})
prior.Print()
posterior = prior.Copy()
posterior.Update('Red')
posterior.Print()
predictive = MakeMixture(prior)
predictive.Print()
def rollB(dice):
die = dice.Random()
while True:
roll = die.Random()
yield roll
iterB = rollB(dice)
Pmf(next(iterB) for i in range(1000))
posterior = prior.Copy()
posterior.Update('Red')
posterior.Print()
predictive = MakeMixture(posterior)
predictive.Print()
posterior.Update('Red')
posterior.Print()
predictive = MakeMixture(posterior)
predictive.Print()
posterior = prior.Copy()
for i in range(10):
posterior.Update(next(iterB))
posterior.Print()
predictive = MakeMixture(posterior)
predictive.Print()
def rollC(dice):
while True:
die = dice.Random()
roll = die.Random()
if roll == 'Red':
yield roll
iterC = rollC(dice)
Pmf(next(iterC) for i in range(1000))
posterior = prior.Copy()
posterior[d1] *= 1
posterior[d2] *= 2
posterior.Normalize()
posterior.Print()
predictive = Pmf({'Red':1.0})
predictive.Print()
def rollD(dice):
die = dice.Random()
while True:
roll = die.Random()
if roll == 'Red':
yield roll
iterD = rollD(dice)
Pmf(next(iterD) for i in range(1000))
posterior = prior.Copy()
posterior.Print()
predictive = Pmf({'Red':1.0})
class ScenarioD(Suite):
def Likelihood(self, data, hypo):
data: k, number of times I rolled to get a Red
hypo: a Die object
p = hypo['Red']
k = data
return (1-p)**(k-1) * p
suite = ScenarioD([d1, d2])
suite.Update(1)
suite.Print()
suite = ScenarioD([d1, d2])
suite.Update(2)
suite.Print()
suite = ScenarioD([d1, d2])
suite.Update(3)
suite.Print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A dice problem
Step2: And I'll make another Pmf to represent the random choice of one die or the other.
Step3: Now I can use the Random method to choose a die and then roll it.
Step4: Scenario A
Step5: We can use this generator to simulate rolls
Step6: In the long run, the proportion of red and blue is 50-50.
Step7: We can see that by computing the weighted mixture of the two dice
Step9: To answer the original question, I'll create a suite of hypotheses where each hypothesis is represented by a die, and the likelihood of the data under each hypothesis is the probability that the given die yields the given outcome.
Step10: Now I can create a suite that represents the prior distribution.
Step11: And update it with the data.
Step12: The posterior probabilities for d1 is 1/3 and the posterior probability for d2 is 2/3.
Step13: Scenario B
Step14: In the long run, the proportion of red is either 1/3 or 2/3, not 1/2
Step15: After the first roll, the posterior suite is the same as in the previous scenario
Step16: In this scenario, we know we are going to roll the same die each time, so the information we learned from the first roll informs our prediction for the second.
Step17: Having seen one red, we are more inclined to belive that I am rolling d2, so we are more inclined to predict that I will roll red again.
Step18: If we continue this process, we will eventually be confident that we know which die is being rolled
Step19: And the predictive distribution will be close to 1/3 or 2/3, depending on which die we think it is.
Step20: Scenario C
Step21: In this scenario, obviously, the outcome is always red
Step22: But now suppose I ask you about the last die I rolled. What is the probability that it is the reddish die, d2?
Step23: That's the same posterior we saw in Scenarios A and B. So even though we knew the outcome would be red, nevertheless we learn something about the die.
Step24: Scenario D
Step25: Again, obviously, the outcome is always red
Step26: But now the probability of getting red is the same regardless of which die I chose. On average, it takes longer to get to red if I chose d1, but if I only tell you the outcome and don't tell you how many times I rolled, you have no way of knowing which die I chose.
Step27: If you are not sure about that argument (and after all this I don't blame you), see below for a more persuasive argument.
Step29: Summary
Step30: The likelihood is the geometric PMF with probability p.
Step31: If you know I got it on the second try, that's equally likely with d1 or d2
Step32: If it takes three tries or more, that's evidence for d1.
|
4,322
|
<ASSISTANT_TASK:>
Python Code:
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sys
% matplotlib notebook
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data(path="mnist.npz")
x_train.shape
y_train.shape
class GAN:
def __init__(self, noise_input_tensor, image_input_tensor, generator_hidden_dim, discriminator_hidden_dim):
self._noise_input = noise_input_tensor
self._image_input = image_input_tensor
with tf.variable_scope("generator"):
self._generator_output, self._generator_parameters = self._fnn(
noise_input_tensor, generator_hidden_dim, image_input_tensor.shape[1],
activation=lambda x: 255 * tf.sigmoid(x)) # (0, 1) -> (0.0, 255.0)
with tf.variable_scope("discriminator"):
self._discriminator_output_for_real_data, self._discriminator_parameters = self._fnn(
image_input_tensor, discriminator_hidden_dim, 1, activation=tf.sigmoid)
with tf.variable_scope("discriminator", reuse=True): # Share weights and biases
d_o_fake = self._discriminator_output_for_synth, _ = self._fnn(self._generator_output,
discriminator_hidden_dim,
1,
activation=tf.sigmoid)
def _fnn(self, input_tensor, hidden_dim, output_dim, activation=None):
w_xh = tf.get_variable(initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
shape=[input_tensor.shape[1], hidden_dim], name="W_xh")
b_xh = tf.get_variable(initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
shape=[hidden_dim], name="b_xh")
hidden = tf.nn.relu(tf.add(tf.matmul(input_tensor, w_xh), b_xh))
w_ho = tf.get_variable(initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
shape=[hidden_dim, output_dim], name="W_ho")
b_ho = tf.get_variable(initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
shape=[output_dim], name="b_ho")
output = tf.add(tf.matmul(hidden, w_ho), b_ho)
if activation is None:
return output, (w_xh, b_xh, w_ho, b_ho)
else:
return activation(output), (w_xh, b_xh, w_ho, b_ho)
@property
def noise_input(self):
return self._noise_input
@property
def image_input(self):
return self._image_input
@property
def generator_output(self):
return self._generator_output
@property
def discriminator_output_from_generator(self):
return self._discriminator_output_for_synth
@property
def discriminator_output_from_image_input(self):
return self._discriminator_output_for_real_data
@property
def g_param(self):
return self._generator_parameters[:]
@property
def d_param(self):
return self._discriminator_parameters[:]
noise_dim = 128
image_dim = x_train.shape[1] * x_train.shape[2]
generator_hidden_dim = 256
discriminator_hidden_dim = 256
tf.reset_default_graph()
with tf.variable_scope("GAN"):
generator_input = tf.placeholder(shape=[None, noise_dim], dtype=tf.float32, name="generator_input")
discriminator_input = tf.placeholder(shape=[None, image_dim], dtype=tf.float32, name="discriminator_input_real")
gan = GAN(generator_input, discriminator_input, generator_hidden_dim, discriminator_hidden_dim)
generator_loss = -tf.reduce_mean(tf.log(gan.discriminator_output_from_generator), name="generator_loss")
discriminator_loss = -tf.reduce_mean(
tf.log(gan.discriminator_output_from_image_input)+tf.log(1.0-gan.discriminator_output_from_generator),
name="discriminator_loss")
with tf.variable_scope("training_configuration"):
g_learing_rate_tensor = tf.get_variable(dtype=tf.float32, initializer=0.0, name="generator_lr")
d_learing_rate_tensor = tf.get_variable(dtype=tf.float32, initializer=0.0, name="discriminator_lr")
global_step = tf.get_variable(dtype=tf.int32, shape=[], name="global_step", trainable=False)
g_train_op = tf.train.AdamOptimizer(learning_rate=g_learing_rate_tensor).minimize(generator_loss,
var_list=gan.g_param,
global_step=global_step)
d_train_op = tf.train.AdamOptimizer(learning_rate=d_learing_rate_tensor).minimize(discriminator_loss,
var_list=gan.d_param, global_step=global_step)
summary_gen_loss = tf.summary.scalar(tensor=generator_loss, name="generator_loss_summary")
summary_dis_loss = tf.summary.scalar(tensor=discriminator_loss, name="discriminator_loss_summary")
summary_all = tf.summary.merge_all()
with tf.variable_scope("training_configuration", auxiliary_name_scope=False): # Re-entering the name scope
batch_size_t = tf.placeholder(dtype=tf.int64, shape=[], name="batch_size")
training_set = tf.data.Dataset.from_tensor_slices((x_train, y_train))
training_set = training_set.batch(batch_size=batch_size_t).map(
lambda x, y: (tf.reshape(tensor=x, shape=[-1, 28 * 28]), y))
batch_iter_train = training_set.make_initializable_iterator()
next_batch_train = batch_iter_train.get_next()
n_epoch = 100
batch_size = 50
g_lr, d_lr = 2e-5, 2e-5
lr_decay = 0.97
num_batch = int(x_train.shape[0]/batch_size)
k = 1
model_param_path = os.path.normpath(r"./model_checkpoints")
saver=tf.train.Saver()
def batch_sampler(batch_size):
return np.random.uniform(-5.0, 5.0, size=[batch_size, noise_dim])
with tf.Session() as sess, tf.summary.FileWriter(logdir=model_param_path) as writer:
writer.add_graph(graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
sess.run([tf.assign(g_learing_rate_tensor, g_lr), tf.assign(d_learing_rate_tensor, d_lr)])
sess.run(batch_iter_train.initializer, feed_dict={batch_size_t: batch_size})
for batch_no in range(num_batch):
x, _ = sess.run(next_batch_train)
# print(x.shape)
# x = x.reshape([-1, image_dim]) # flatten each sample manually
# Train the discriminator network k times
for _ in range(k):
noise_batch = batch_sampler(batch_size)
feed_dict = {gan.image_input: x, gan.noise_input: noise_batch}
sess.run(d_train_op, feed_dict=feed_dict)
# Train the generator network once
noise_batch = batch_sampler(batch_size)
feed_dict = {gan.image_input: x, gan.noise_input: noise_batch}
_, summary, step = sess.run([g_train_op, summary_all, global_step], feed_dict=feed_dict)
writer.add_summary(summary=summary, global_step=step)
g_lr = g_lr * lr_decay
d_lr = d_lr * lr_decay
saver.save(sess=sess, save_path=os.path.join(model_param_path, "GAN"))
print("Done!")
n = 10
canvas = np.empty((28 * n, 28 * n))
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(model_param_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(n):
# Noise input.
z = batch_sampler(n)
# Generate image from noise.
g = sess.run(gan.generator_output, feed_dict={gan.noise_input: z})
# Reverse colours for better display
# g = -1 * (g - 1)
g = (-1 * (g - 255)).astype(np.int32)
for j in range(n):
# Draw the generated digits
canvas[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = g[j].reshape([28, 28])
plt.figure(figsize=(n, n))
plt.imshow(canvas, origin="upper", cmap="gray")
plt.show()
else:
print("Failed to load model checkpoint.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data Set
Step2: Verify data strucutre
Step3: Define model
Step4: Specify Dimensions
Step5: Create GAN model, define I/O , loss functions and optimizers.
Step6: Prepare Dataset and Start Training
Step7: Test Network
|
4,323
|
<ASSISTANT_TASK:>
Python Code:
2+2
print 2*3
print (4+6)*(2+9) # should calculate to 110
print 12.0/11.0
print(5/3) # Integer division gives a 'floor' value (rounding down, basically).
print(5.0/3.0) # Dividing floats (usually) gives the expected answer.
print(5.0/3) # The interpreter uses the more complex type to infer the type for the result.
print(5/3.0) # The order for type "upcasting" doesn't matter
0.1 + 0.2
a = 5
print a
y = x**2 - 3*x + 12 # just like in algebra, right?
x = 10
y = x**2 - 3*x + 12
print y
import numpy as np # the python array-math library
x = np.arange(0.0, 2*np.pi, 0.01) # make an array of numbers from 0 to 2π with a number every 0.01.
y = np.sin(x)
print "The length of x is: %s" % (len(x))
print "The length of y is: %s" % (len(y))
print "The first 5 values in the x array are:\n%s" % x[0:5]
print "The first 5 values in the y array are:\n%s" % y[0:5]
# this imports some plotting stuff we'll use
from bokeh.plotting import output_notebook
output_notebook()
from bokeh.plotting import figure, show
p = figure(title="Sine Example")
p.line(x, y)
show(p)
def f(x):
return x**2 - 3*x + 12
f(3)
print x[:5] # just to be clear that in the scope of this notebook, the symbol x is defined
print f(x)[:5] # only show the first 5 entries
p2 = figure()
p2.line(x, f(x))
p2_nbh = show(p2)
import random
def headache(name, number_of_repeats=5):
a pretty useless function
namelist = list(name)
for i in range(0, number_of_repeats):
random.shuffle(namelist)
for letter in namelist:
print letter,
headache("travis", 40)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Another several examples
Step2: One major difference between using a calculator and doing calculations on the computer is that there are a couple of types of numbers -- integers and floating point values. You can think of integers as whole numbers and floats (as floating point values are called) as supporting a decimal or fractional part of the value. This shows up sometimes is odd behavior with division.
Step3: Let's look at another example of float math
Step4: Wat ?!?! There are occasionally precision issues because of the way floating point values work. It's actually an interesting abstraction (feel free to study a more detailed explanation of how IEEE Floats work). This is a good example of how abstractions can 'leak' (more on this later). A good explanation of how this affects Python is here. For our purposes, this really doesn't matter. Just a curiosity, so, moving right along...
Step5: This may not seem very exciting at first, but variables are an important part of programming. It's good to know that you can use them in Python.
Step6: I know x isn't defined. Isn't that what a variable is? Not exactly... More on this error stuff later. Try this
Step7: So, the variables on the right side of the equals sign have to already be assigned a value. Otherwise, the interpreter tries to evaluate the right side and assign it to the left side.
Step8: That's cool. Now let's get back to something we tried earlier. Remember that expression y = x**2 - 3*x + 12 ? In algebra, that's actually a function. In Python (and just about every other language) we have the concept of functions as well. The keyword def is used the define a function in Python
Step9: So, now you can evaluate the function by handing it a value, or parameter (in this case, we've called it x).
Step10: So earlier, we assigned the value [0.0, 0.01, 0.02 ...] to the symbol x. (I'm starting to use the more official names for things here.) What happens if we pass that symbol into the function f?
Step11: %% boom %% Mind. Blown.
Step13: This is pretty math-y, what else can functions do?
|
4,324
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
# TODO: Implement Function
return None, None, None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for dencoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Number of Layers
num_layers = None
# Embedding Size
encoding_embedding_size = None
decoding_embedding_size = None
# Learning Rate
learning_rate = None
# Dropout Keep Probability
keep_probability = None
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
4,325
|
<ASSISTANT_TASK:>
Python Code:
def gen():
for i in range(10):
yield i
dataset = tf.data.Dataset.from_generator(gen, tf.float32)\
.make_one_shot_iterator()\
.get_next()
with tf.Session() as sess:
_data = sess.run(dataset)
print(_data)
with tf.Session() as sess:
for _ in range(10):
_data = sess.run(dataset)
print(_data)
# End of sequence Error 발생
with tf.Session() as sess:
for _ in range(12):
_data = sess.run(dataset)
print(_data)
def gen():
for i, j in zip(range(10, 20), range(10)):
yield (i, j)
dataset = tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))\
.make_one_shot_iterator()\
.get_next()
with tf.Session() as sess:
for _ in range(10):
_label, _feature = sess.run(dataset)
print(_label, _feature)
def gen():
for i, j in zip(range(10, 1010), range(1000)):
yield (i, j)
dataset = tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))\
.shuffle(7777)\
.batch(20)\
.make_one_shot_iterator()\
.get_next()
with tf.Session() as sess:
for _ in range(10):
_label, _feature = sess.run(dataset)
print(_label, _feature)
dataset = tf.data.TextLineDataset("./test_data.csv")\
.make_one_shot_iterator()\
.get_next()
with tf.Session() as sess:
_data = sess.run(dataset)
print(_data)
dataset = tf.data.TextLineDataset("./test_data.csv")\
.make_one_shot_iterator()\
.get_next()
lines = tf.decode_csv(dataset, record_defaults=[[0]]*10)
feature = tf.stack(lines[1:]) #, axis=1)
label = lines[0]
with tf.Session() as sess:
_fea, _lab = sess.run([feature, label])
print(_lab, _fea)
dataset = tf.data.TextLineDataset("./test_data.csv")\
.batch(2)\
.repeat(999999)\
.make_one_shot_iterator()\
.get_next()
lines = tf.decode_csv(dataset, record_defaults=[[0]]*10)
feature = tf.stack(lines[1:], axis=1)
label = tf.expand_dims(lines[0], axis=-1)
feature = tf.cast(feature, tf.float32)
label = tf.cast(label, tf.float32)
# float형으로 정의해야 이상없이 연산이 됨
with tf.Session() as sess:
_fea, _lab = sess.run([feature, label])
for f, l in zip(_fea, _lab):
print(f, l)
layer1 = tf.layers.dense(feature, units=9, activation=tf.nn.relu)
layer2 = tf.layers.dense(layer1, units=9, activation=tf.nn.relu)
layer3 = tf.layers.dense(layer2, units=9, activation=tf.nn.relu)
layer4 = tf.layers.dense(layer3, units=9, activation=tf.nn.relu)
out = tf.layers.dense(layer4, units=1)
print("label's shape {}".format(label))
# label's shape (?,) : [1, 2, 3, 4, 5, 6]
# int면 계산이 안됨
print("out's shape {}".format(out))
# [[1], [2], [3], [4], [5], [6]]
loss = tf.losses.sigmoid_cross_entropy(label, out)
train_op = tf.train.GradientDescentOptimizer(1e-2).minimize(loss)
pred = tf.nn.sigmoid(out)
accuracy = tf.metrics.accuracy(label, tf.round(pred))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for i in range(30):
_, _loss, _acc = sess.run([train_op, loss, accuracy])
print("step: {}, loss: {}, accuracy: {}".format(i, _loss, _acc))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: generator로 label, feature까지 출력하고 싶다면
Step2: Minibatch를 하고 싶다면
Step3: (2) TextLineDataset
Step4: b'1,1,2,3,4,5,6,7,8,9'
Step5: Modeling
Step6: loss, Optimizer 정의
Step7: Shapes (?, 1) and (?,) are incompatible error
|
4,326
|
<ASSISTANT_TASK:>
Python Code:
# 检查你的Python版本
from sys import version_info
if version_info.major != 2 and version_info.minor != 7:
raise Exception('请使用Python 2.7来完成此项目')
import numpy as np
import pandas as pd
# 数据可视化代码
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# 加载数据集
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# 显示数据列表中的前几项乘客数据
display(full_data.head())
# 从数据集中移除 'Survived' 这个特征,并将它存储在一个新的变量中。
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# 显示已移除 'Survived' 特征的数据集
display(data.head())
def accuracy_score(truth, pred):
返回 pred 相对于 truth 的准确率
# 确保预测的数量与结果的数量一致
if len(truth) == len(pred):
# 计算预测准确率(百分比)
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# 测试 'accuracy_score' 函数
predictions = pd.Series(np.ones(5, dtype = int)) #五个预测全部为1,既存活
print accuracy_score(outcomes[:5], predictions)
def predictions_0(data):
不考虑任何特征,预测所有人都无法生还
predictions = []
for _, passenger in data.iterrows():
# 预测 'passenger' 的生还率
predictions.append(0)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_0(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Sex')
def predictions_1(data):
只考虑一个特征,如果是女性则生还
predictions = []
for _, passenger in data.iterrows():
if (passenger['Sex'] == 'male'):
predictions.append(0)
else:
predictions.append(1)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_1(data)
**问题2**:当我们预测船上女性乘客全部存活,而剩下的人全部遇难,那么我们预测的准确率会达到多少?
**回答**: *78.68%*
**提示**:你需要在下面添加一个代码区域,实现代码并运行来计算准确率。
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
def predictions_2(data):
考虑两个特征:
- 如果是女性则生还
- 如果是男性并且小于10岁则生还
predictions = []
for _, passenger in data.iterrows():
if (passenger['Sex'] == 'female'):
predictions.append(1)
elif (passenger['Age'] < 10):
predictions.append(1)
else :
predictions.append(0)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_2(data)
**问题3**:当预测所有女性以及小于10岁的男性都存活的时候,预测的准确率会达到多少?
**回答**: *79.35%*
**提示**:你需要在下面添加一个代码区域,实现代码并运行来计算准确率。
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'female'","Pclass > 2"])
def predictions_3(data):
考虑多个特征,准确率至少达到80%
predictions = []
for _, passenger in data.iterrows():
if (passenger['Sex'] == 'female'):
if (passenger['Pclass'] > 2 and passenger['Age'] > 40 and passenger['Age'] < 50):
predictions.append(0)
else :
predictions.append(1)
else:
if (passenger['Age'] < 10):
predictions.append(1)
elif (passenger['Pclass'] < 2):
if (passenger['Age'] < 40 and passenger['Age'] > 30):
predictions.append(1)
elif (passenger['Parch'] == 0 and passenger['Age'] < 30 and passenger['Age'] > 20):
predictions.append(1)
else:
predictions.append(0)
else :
predictions.append(0)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 从泰坦尼克号的数据样本中,我们可以看到船上每位旅客的特征
Step3: 这个例子展示了如何将泰坦尼克号的 Survived 数据从 DataFrame 移除。注意到 data(乘客数据)和 outcomes (是否存活)现在已经匹配好。这意味着对于任何乘客的 data.loc[i] 都有对应的存活的结果 outcome[i]。
Step5: 提示:如果你保存 iPython Notebook,代码运行的输出也将被保存。但是,一旦你重新打开项目,你的工作区将会被重置。请确保每次都从上次离开的地方运行代码来重新生成变量和函数。
Step6: 问题1:对比真实的泰坦尼克号的数据,如果我们做一个所有乘客都没有存活的预测,这个预测的准确率能达到多少?
Step7: 考虑一个特征进行预测
Step9: 观察泰坦尼克号上乘客存活的数据统计,我们可以发现大部分男性乘客在船沉没的时候都遇难了。相反的,大部分女性乘客都在事故中生还。让我们以此改进先前的预测:如果乘客是男性,那么我们就预测他们遇难;如果乘客是女性,那么我们预测他们在事故中活了下来。
Step10: 考虑两个特征进行预测
Step12: 仔细观察泰坦尼克号存活的数据统计,在船沉没的时候,大部分小于10岁的男孩都活着,而大多数10岁以上的男性都随着船的沉没而遇难。让我们继续在先前预测的基础上构建:如果乘客是女性,那么我们就预测她们全部存活;如果乘客是男性并且小于10岁,我们也会预测他们全部存活;所有其它我们就预测他们都没有幸存。
Step13: 你自己的预测模型
Step15: 当查看和研究了图形化的泰坦尼克号上乘客的数据统计后,请补全下面这段代码中缺失的部分,使得函数可以返回你的预测。
Step16: 问题4:请描述你实现80%准确度的预测模型所经历的步骤。您观察过哪些特征?某些特性是否比其他特征更有帮助?你用了什么条件来预测生还结果?你最终的预测的准确率是多少?
|
4,327
|
<ASSISTANT_TASK:>
Python Code:
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
%%bash
# Create a BigQuery dataset for feat_eng if it doesn't exist
datasetexists=$(bq ls -d | grep -w feat_eng)
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: feat_eng"
bq --location=US mk --dataset \
--description 'Taxi Fare' \
$PROJECT:feat_eng
echo "\nHere are your current datasets:"
bq ls
fi
%%bigquery
CREATE OR REPLACE TABLE
feat_eng.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
passenger_count*1.0 AS passengers,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 10000) = 1
AND fare_amount >= 2.5
AND passenger_count > 0
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
%%bigquery
# LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT
*
FROM
feat_eng.feateng_training_data
LIMIT
0
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.baseline_model OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
feat_eng.feateng_training_data
%%bigquery
# Eval statistics on the held out data.
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.baseline_model)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.baseline_model)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.baseline_model)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_1 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS dayofweek,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
feat_eng.feateng_training_data
%%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.model_1)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_2 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#pickup_datetime,
EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS dayofweek,
EXTRACT(HOUR
FROM
pickup_datetime) AS hourofday,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_3 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#pickup_datetime,
#EXTRACT(DAYOFWEEK FROM pickup_datetime) AS dayofweek,
#EXTRACT(HOUR FROM pickup_datetime) AS hourofday,
CONCAT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING), CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING)) AS hourofday,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
%%bigquery
CREATE OR REPLACE MODEL feat_eng.model_4
OPTIONS
(model_type='linear_reg',
input_label_cols=['fare_amount'])
AS
SELECT
fare_amount,
passengers,
#pickup_datetime,
#EXTRACT(DAYOFWEEK FROM pickup_datetime) AS dayofweek,
#EXTRACT(HOUR FROM pickup_datetime) AS hourofday,
#CONCAT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING),
#CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING)) AS hourofday,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM `feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_4)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_4)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_5 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#pickup_datetime,
#EXTRACT(DAYOFWEEK FROM pickup_datetime) AS dayofweek,
#EXTRACT(HOUR FROM pickup_datetime) AS hourofday,
#CONCAT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING),
#CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING)) AS hourofday,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
#pickuplon,
#pickuplat,
#dropofflon,
#dropofflat,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_5)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_5)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_6 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#pickup_datetime,
#EXTRACT(DAYOFWEEK FROM pickup_datetime) AS dayofweek,
#EXTRACT(HOUR FROM pickup_datetime) AS hourofday,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
#pickuplon,
#pickuplat,
#dropofflon,
#dropofflat,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean,
CONCAT(ST_AsText(ST_SnapToGrid(ST_GeogPoint(pickuplon,
pickuplat),
0.01)), ST_AsText(ST_SnapToGrid(ST_GeogPoint(dropofflon,
dropofflat),
0.01))) AS pickup_and_dropoff
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_6)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_6)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_6 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean,
CONCAT(ST_AsText(ST_SnapToGrid(ST_GeogPoint(pickuplon,
pickuplat),
0.01)), ST_AsText(ST_SnapToGrid(ST_GeogPoint(dropofflon,
dropofflat),
0.01))) AS pickup_and_dropoff
FROM
`feat_eng.feateng_training_data`
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_7 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
CONCAT( ML.BUCKETIZE(pickuplon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(pickuplat,
GENERATE_ARRAY(37, 45, 0.01)), ML.BUCKETIZE(dropofflon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(dropofflat,
GENERATE_ARRAY(37, 45, 0.01)) ) AS pickup_and_dropoff
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.model_7)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_7)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_7)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.final_model
TRANSFORM(fare_amount,
#SQRT( (pickuplon-dropofflon)*(pickuplon-dropofflon) + (pickuplat-dropofflat)*(pickuplat-dropofflat) ) AS euclidean,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
CONCAT( ML.BUCKETIZE(pickuplon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(pickuplat,
GENERATE_ARRAY(37, 45, 0.01)), ML.BUCKETIZE(dropofflon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(dropofflat,
GENERATE_ARRAY(37, 45, 0.01)) ) AS pickup_and_dropoff ) OPTIONS(input_label_cols=['fare_amount'],
model_type='linear_reg',
l2_reg=0.1) AS
SELECT
*
FROM
feat_eng.feateng_training_data
%%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.final_model)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.final_model)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.final_model)
%%bigquery
SELECT
*
FROM
ML.PREDICT(MODEL feat_eng.final_model,
(
SELECT
-73.982683 AS pickuplon,
40.742104 AS pickuplat,
-73.983766 AS dropofflon,
40.755174 AS dropofflat,
3.0 AS passengers,
TIMESTAMP('2019-06-03 04:21:29.769443 UTC') AS pickup_datetime ))
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
x = ['m4', 'm5', 'm6','m7', 'final']
RMSE = [9.65,5.58,5.90,6.23,5.39]
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x_pos, RMSE, color='green')
plt.xlabel("Model")
plt.ylabel("RMSE")
plt.title("RMSE Model Summary")
plt.xticks(x_pos, x)
plt.show()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The source dataset
Step2: Create the training data table
Step3: Verify table creation
Step4: Baseline Model
Step5: Note, the query takes several minutes to complete. After the first iteration is complete, your model (baseline_model) appears in the navigation panel of the BigQuery web UI. Because the query uses a CREATE MODEL statement to create a model, you do not see query results.
Step6: NOTE
Step7: Model 1
Step8: Once the training is done, visit the BigQuery Cloud Console and look at the model that has been trained. Then, come back to this notebook.
Step9: Here we run a SQL query to take the SQRT() of the mean squared error as your loss metric for evaluation for the benchmark_model.
Step10: Model 2
Step11: Model 3
Step12: Model 4
Step13: Next, two distinct SQL statements show the TRAINING and EVALUATION metrics of model_1.
Step14: Sliding down the slope toward a loss minimum (reduced taxi fare)!
Step15: Next, two distinct SQL statements show metrics for model_5.
Step16: Model 6
Step17: Next, we evaluate model_6.
Step18: Code Clean Up
Step19: BQML's Pre-processing functions
Step20: Next, we evaluate model_7.
Step21: Final Model
Step22: Next, we evaluate the final model.
Step23: Predictive Model
Step24: Lab Summary
|
4,328
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
import toyplot as tp
df = pd.read_csv('free_throws.csv', names=["away", "home", "team", "player", "score"])
df["at_home"] = df["home"] == df["team"]
df.head()
df.groupby("at_home").mean()
sdf = pd.pivot_table(df, index=["player", "team"], columns="at_home", values=["score"],
aggfunc=[len, sum], fill_value=0).reset_index()
sdf.columns = ['player', 'team', 'atm_away', 'atm_home', 'score_away', 'score_home']
sdf['atm_total'] = sdf['atm_away'] + sdf['atm_home']
sdf['score_total'] = sdf['score_away'] + sdf['score_home']
sdf.sample(10)
data = sdf.query('atm_total > 50').copy()
len(data)
data['p_home'] = data['score_home'] / data['atm_home']
data['p_away'] = data['score_away'] / data['atm_away']
data['p_ovr'] = (data['score_total']) / (data['atm_total'])
# two-sided
data['zval'] = (data['p_home'] - data['p_away']) / np.sqrt(data['p_ovr'] * (1-data['p_ovr']) * (1/data['atm_away'] + 1/data['atm_home']))
data['pval'] = 2*(1-sp.stats.norm.cdf(np.abs(data['zval'])))
# one-sided testing home advantage
# data['zval'] = (data['p_home'] - data['p_away']) / np.sqrt(data['p_ovr'] * (1-data['p_ovr']) * (1/data['atm_away'] + 1/data['atm_home']))
# data['pval'] = (1-sp.stats.norm.cdf(data['zval']))
data.sample(10)
canvas = tp.Canvas(800, 300)
ax1 = canvas.axes(grid=(1, 2, 0), label="Histogram p-values")
hist_p = ax1.bars(np.histogram(data["pval"], bins=50, normed=True), color="steelblue")
hisp_p_density = ax1.plot([0, 1], [1, 1], color="red")
ax2 = canvas.axes(grid=(1, 2, 1), label="Histogram z-values")
hist_z = ax2.bars(np.histogram(data["zval"], bins=50, normed=True), color="orange")
x = np.linspace(-3, 3, 200)
hisp_z_density = ax2.plot(x, sp.stats.norm.pdf(x), color="red")
T = -2 * np.sum(np.log(data["pval"]))
print 'p-value for Fisher Combination Test: {:.3e}'.format(1 - sp.stats.chi2.cdf(T, 2*len(data)))
print '"p-value" Bonferroni: {:.3e}'.format(min(1, data["pval"].min() * len(data)))
alpha = 0.05
data["reject_naive"] = 1*(data["pval"] < alpha)
print 'Number of rejections: {}'.format(data["reject_naive"].sum())
from statsmodels.sandbox.stats.multicomp import multipletests
data["reject_bc"] = 1*(data["pval"] < alpha / len(data))
print 'Number of rejections: {}'.format(data["reject_bc"].sum())
is_reject, corrected_pvals, _, _ = multipletests(data["pval"], alpha=0.1, method='fdr_bh')
data["reject_fdr"] = 1*is_reject
data["pval_fdr"] = corrected_pvals
print 'Number of rejections: {}'.format(data["reject_fdr"].sum())
len(data.query("atm_total > 2500"))
reduced_data = data.query("atm_total > 2500").copy()
is_reject2, corrected_pvals2, _, _ = multipletests(reduced_data["pval"], alpha=0.1, method='fdr_bh')
reduced_data["reject_fdr2"] = 1*is_reject2
reduced_data["pval_fdr2"] = corrected_pvals2
print 'Number of rejections: {}'.format(reduced_data["reject_fdr2"].sum())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data munging
Step2: Overall free throw%
Step3: Aggregating to player level
Step4: Individual tests
Step5: Global tests
Step6: Bonferroni's method
Step7: Conclusion
Step8: If we don't correct for multiple comparisons, there are actually 65 "significant" results (at $\alpha = 0.05$), which corresponds to about 7% of the players.
Step9: Indeed, no rejections.
Step10: Even though the BHq procedure has more power, we can't reject any of the individual hypothesis, hence we don't find sufficient evidence for any of the players that free throw performance is affected by location.
|
4,329
|
<ASSISTANT_TASK:>
Python Code:
!git clone https://github.com/markriedl/easygen.git
!cp easygen/*.js /usr/local/share/jupyter/nbextensions/google.colab/
!cp easygen/images/*.png /usr/local/share/jupyter/nbextensions/google.colab/
!apt-get update
!apt-get install chromium-chromedriver
!pip install -r easygen/requirements.txt
!git clone https://github.com/NVlabs/stylegan.git
!cp easygen/stylegan_runner.py stylegan
!git clone https://github.com/nshepperd/gpt-2
import IPython
from google.colab import output
def python_save_hook(file_text, filename):
import easygen
import hooks
status = hooks.python_save_hook_aux(file_text, filename)
ret_status = 'true' if status else 'false'
return IPython.display.JSON({'result': ret_status})
def python_load_hook(filename):
import easygen
import hooks
result = hooks.python_load_hook_aux(filename)
return IPython.display.JSON({'result': result})
def python_cwd_hook(dir):
import easygen
import hooks
result = hooks.python_cwd_hook_aux(dir)
return IPython.display.JSON({'result': result})
def python_copy_hook(path1, path2):
import easygen
import hooks
status = hooks.python_copy_hook_aux(path1, path2)
ret_status = 'true' if status else 'false'
return IPython.display.JSON({'result': ret_status})
def python_move_hook(path1, path2):
import easygen
import hooks
status = hooks.python_move_hook_aux(path1, path2)
ret_status = 'true' if status else 'false'
return IPython.display.JSON({'result': ret_status})
def python_open_text_hook(path):
import easygen
import hooks
status = hooks.python_open_text_hook_aux(path)
ret_status = 'true' if status else 'false'
return IPython.display.JSON({'result': ret_status})
def python_open_image_hook(path):
import easygen
import hooks
status = hooks.python_open_image_hook_aux(path)
ret_status = 'true' if status else 'false'
return IPython.display.JSON({'result': ret_status})
def python_mkdir_hook(path, dir_name):
import easygen
import hooks
status = hooks.python_mkdir_hook_aux(path, dir_name)
ret_status = 'true' if status else 'false'
return IPython.display.JSON({'result': ret_status})
def python_trash_hook(path):
import easygen
import hooks
status = hooks.python_trash_hook_aux(path)
ret_status = 'true' if status else 'false'
return IPython.display.JSON({'result': ret_status})
def python_run_hook(path):
import easygen
program_file_name = path
easygen.main(program_file_name)
return IPython.display.JSON({'result': 'true'})
output.register_callback('notebook.python_cwd_hook', python_cwd_hook)
output.register_callback('notebook.python_copy_hook', python_copy_hook)
output.register_callback('notebook.python_move_hook', python_move_hook)
output.register_callback('notebook.python_open_text_hook', python_open_text_hook)
output.register_callback('notebook.python_open_image_hook', python_open_image_hook)
output.register_callback('notebook.python_save_hook', python_save_hook)
output.register_callback('notebook.python_load_hook', python_load_hook)
output.register_callback('notebook.python_mkdir_hook', python_mkdir_hook)
output.register_callback('notebook.python_trash_hook', python_trash_hook)
output.register_callback('notebook.python_run_hook', python_run_hook)
import sys
sys.path.insert(0, 'easygen')
import easygen
!python gpt-2/download_model.py 117M
!python gpt-2/download_model.py 345M
!wget -O cats256x256.pkl https://www.dropbox.com/s/1w97383h0nrj4ea/karras2019stylegan-cats-256x256.pkl?dl=0
!wget -O wiki.zip https://www.dropbox.com/s/39w6mj1akwy2a0r/wiki.zip?dl=0
!unzip wiki.zip
%%html
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<style>
// MAKE CANVAS
canvas {
border:1px solid #d3d3d3;
background-color: #f1f1f1;
}
</style>
</head>
<body>
<script src="nbextensions/google.colab/module_dicts.js"></script>
<script src="nbextensions/google.colab/gui.js"></script>
<script>load_program("easygen/examples/make_new_colors")</script>
<div id="inp">
<h1 id="inp_module">text</h1>
<strong id="inp_param">text</strong>
<input id="inp_val" />
<button onmouseup="do_input_button_up()">ok</button>
</div>
<div id="make">
<h1>Make New Module</h1>
<select id="module_select"></select>
<button onmouseup="do_make_module_button_up()">Add Module</button>
</div>
<div>
<h1>Save Program</h1>
<input id="inp_save" />
<button onmouseup="save_program()">Save</button>
</div>
<div>
<h1>Load Program</h1>
<input id="inp_load" />
<button onmouseup="load_program()">Load</button>
</div>
<div>
<h1>Clear Program</h1>
<button onmouseup="clear_program()">Clear</button>
</div>
</body>
</html>
program_file_name = 'easygen/examples/make_new_colors'
easygen.main(program_file_name)
%%html
<html>
<body>
<script src="nbextensions/google.colab/run_program.js"></script>
</script>
<b>Run Program:</b> <input id="inp_run" /> <button onmouseup="run_program()">Run</button>
</body>
</html>
%%html
<html>
<body>
<script src="nbextensions/google.colab/file_manager.js"></script>
<h1>Manage Files</h1>
<table cols="3" border="0">
<tr><td><strong id="path1">/content</strong></td><td></td><td><strong id="path2">/content</strong></td></tr>
<tr><td><select multiple id="file_list1"></select></td><td><p><button id="copy_button" onmouseup="do_copy_mouse_up()">Copy --></button></p><p><button id="move_button" onmouseup="do_move_mouse_up()">Move --></button></p></td><td><select multiple id="file_list2"></select></td></tr>
<tr><td><button id="open_text_button" onmouseup="do_open_text_mouse_up()">Open Text</button><br><button id="open_image_button" onmouseup="do_open_image_mouse_up()">Open Image</button><br><button id="open_text_button" onmouseup="do_trash_mouse_up()">Send to trash</button></td></tr>
</table>
<h2>Make Directories</h2>
<input id="mkdir_input" /><button onmouseup="do_mkdir_mouse_up()">Make directory</button>
</body>
</html>
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install requirements
Step2: Download StyleGAN
Step3: Download GPT-2
Step4: Create backend hooks for saving and loading programs
Step5: Import EasyGen
Step6: 2. Download pre-trained neural network models
Step7: Download the GPT-2 medium 345M model. Will save to models/345M directory.
Step8: Download the StyleGAN cats model (256x256). Will save as "cats256x256.pkl" in the home directory.
Step9: 2.2 Download Wikipedia
Step10: 3. Run the GUI
Step11: 4. Run Your Program
Step12: Once you've made your own program, run the cell below, enter the program below, and the press the run button.
Step13: 5. View Your Output Files
|
4,330
|
<ASSISTANT_TASK:>
Python Code:
for researcher in researchers:
if 'Wil' in researcher.name:
print 'ID:{}, name:{}'.format(researcher.id, researcher.name)
print 'pre-filter:', query_list[-1].count()
query_list.append(query_list[-1].exclude(researcher_id=2))
print 'post-filter:', query_list[-1].count()
print 'pre-filter:', query_list[-1].count()
# For each tag, only keep it if the shortname of the species of the experiment of the image that
# this tag is attached to is "HP":
query_list.append(query_list[-1].filter(image__xp__species__shortname='HP'))
print 'post-filter:', query_list[-1].count()
print 'pre-filter:', query_list[-1].count()
# For each tag, only keep it if the name of the experiment of the image that
# this tag is attached to contains "Pleco":
query_list.append(query_list[-1].filter(image__xp__name__contains='Pleco'))
print 'post-filter:', query_list[-1].count()
print 'pre-filter:', query_list[-1].count()
query_list.append(query_list[-1].filter(timestamp__month=2, timestamp__year=2016))
print 'post-filter:', query_list[-1].count()
export_these_tags = list(query_list[-1])
export_these_tags_test = list(
query_list[0].exclude(
researcher_id=2).filter(
image__xp__species__shortname='HP').filter(
image__xp__name__contains='Pleco').filter(
timestamp__month=1, timestamp__year=2015)
)
print 'Same length:', len(export_these_tags) == len(export_these_tags_test)
print 'Same tags:', [tag.id for tag in export_these_tags] == [tag.id for tag in export_these_tags_test]
# export_these_tags = query_list[-1]
# set this to suit yourself - leaving it blank is perfectly valid
custom_prefix = ''
# These are probably comfortable defaults
export_directory = '/mnt/server_storage/exported_data'
timestamp_export_file = True
#
# First, we build the full pathname of the exported file:
#
if timestamp_export_file:
prefix = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S-')
else:
prefix = ''
export_filename = custom_prefix + prefix + 'fishface_exported_data.csv'
export_path = os.path.join(export_directory, export_filename)
cjr_start_cache = dict()
#
# Then we write the file from `export_these_tags`:
#
with open(export_path, 'wb') as csvfile:
writer = csv.writer(csvfile, dialect=csv.excel)
# Write header row
writer.writerow([
'TagId',
'TaggerID',
'Species',
'XPName',
'ImageTimestamp',
'ImageVoltage',
'TagTimestamp',
'TagStartX',
'TagStartY',
'Angle',
'CJR_ID',
'SecondsSinceCJRFirstCapture',
])
species = export_these_tags[0].image.xp.species.shortname
for tag in export_these_tags:
cjr_id = tag.image.cjr_id
if cjr_id not in cjr_start_cache:
images_timestamps_for_cjr = [im.capture_timestamp for im in
dm.CaptureJobRecord.objects.get(pk=cjr_id).image_set.all()]
cjr_start_cache[cjr_id] = sorted(images_timestamps_for_cjr)[0]
delta = (tag.image.capture_timestamp - cjr_start_cache[cjr_id])
seconds_since_cjr_start = delta.seconds + (float(delta.microseconds) / 1000000)
x, y = tag.int_start
tag_image = tag.image
writer.writerow([
tag.id,
tag.researcher_id,
species,
tag_image.xp.name,
tag_image.capture_timestamp.strftime('%Y-%m-%d %H:%M:%S'),
tag_image.voltage,
tag.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
x,
y,
tag.degrees,
cjr_id,
seconds_since_cjr_start,
])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, let's only keep the pleco tags
Step2: Now let's keep only the tags from January of 2015
Step3: Finally, we name the latest query export_these_tags and turn it into a Python list for further processing
Step4: Shortcut filtering
Step5: Either way, you want to end up with an export_these_tags list (or query) at the end of the filtering stage.
Step6: Export Tags
|
4,331
|
<ASSISTANT_TASK:>
Python Code:
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.14'
import tensorflow as tf
print(tf.__version__)
import google.datalab.bigquery as bq
query=
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
LIMIT 10
df = bq.Query(query).execute().result().to_dataframe()
df
query=
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 10
df = bq.Query(query).execute().result().to_dataframe()
df
query=
SELECT source, LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title FROM
(SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
title
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
)
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
df = bq.Query(query + " LIMIT 10").execute().result().to_dataframe()
df.head()
traindf = bq.Query(query + " AND ABS(MOD(FARM_FINGERPRINT(title), 4)) > 0").execute().result().to_dataframe()
evaldf = bq.Query(query + " AND ABS(MOD(FARM_FINGERPRINT(title), 4)) = 0").execute().result().to_dataframe()
traindf['source'].value_counts()
evaldf['source'].value_counts()
import os, shutil
DATADIR='data/txtcls'
shutil.rmtree(DATADIR, ignore_errors=True)
os.makedirs(DATADIR)
traindf.to_csv( os.path.join(DATADIR,'train.tsv'), header=False, index=False, encoding='utf-8', sep='\t')
evaldf.to_csv( os.path.join(DATADIR,'eval.tsv'), header=False, index=False, encoding='utf-8', sep='\t')
!head -3 data/txtcls/train.tsv
!wc -l data/txtcls/*.tsv
%%bash
## Make sure we have the latest version of Google Cloud Storage package
pip install --upgrade google-cloud-storage
rm -rf txtcls_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/txtclsmodel/trainer \
-- \
--output_dir=${PWD}/txtcls_trained \
--train_data_path=${PWD}/data/txtcls/train.tsv \
--eval_data_path=${PWD}/data/txtcls/eval.tsv \
--num_epochs=0.1
%%bash
gsutil cp data/txtcls/*.tsv gs://${BUCKET}/txtcls/
%%bash
OUTDIR=gs://${BUCKET}/txtcls/trained_fromscratch
JOBNAME=txtcls_$(date -u +%y%m%d_%H%M%S)
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/txtclsmodel/trainer \
--job-dir=$OUTDIR \
--scale-tier=BASIC_GPU \
--runtime-version=$TFVERSION \
-- \
--output_dir=$OUTDIR \
--train_data_path=gs://${BUCKET}/txtcls/train.tsv \
--eval_data_path=gs://${BUCKET}/txtcls/eval.tsv \
--num_epochs=5
%%bash
gsutil ls gs://${BUCKET}/txtcls/trained_fromscratch/export/exporter/
%%bash
MODEL_NAME="txtcls"
MODEL_VERSION="v1_fromscratch"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/txtcls/trained_fromscratch/export/exporter/ | tail -1)
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME} --quiet
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=$TFVERSION
techcrunch=[
'Uber shuts down self-driving trucks unit',
'Grover raises €37M Series A to offer latest tech products as a subscription',
'Tech companies can now bid on the Pentagon’s $10B cloud contract'
]
nytimes=[
'‘Lopping,’ ‘Tips’ and the ‘Z-List’: Bias Lawsuit Explores Harvard’s Admissions',
'A $3B Plan to Turn Hoover Dam into a Giant Battery',
'A MeToo Reckoning in China’s Workplace Amid Wave of Accusations'
]
github=[
'Show HN: Moon – 3kb JavaScript UI compiler',
'Show HN: Hello, a CLI tool for managing social media',
'Firefox Nightly added support for time-travel debugging'
]
import pickle
from tensorflow.python.keras.preprocessing import sequence
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import json
requests = techcrunch+nytimes+github
# Tokenize and pad sentences using same mapping used in the deployed model
tokenizer = pickle.load( open( "txtclsmodel/tokenizer.pickled", "rb" ) )
requests_tokenized = tokenizer.texts_to_sequences(requests)
requests_tokenized = sequence.pad_sequences(requests_tokenized,maxlen=50)
# JSON format the requests
request_data = {'instances':requests_tokenized.tolist()}
# Authenticate and call CMLE prediction API
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1', credentials=credentials,
discoveryServiceUrl='https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json')
parent = 'projects/%s/models/%s' % (PROJECT, 'txtcls') #version is not specified so uses default
response = api.projects().predict(body=request_data, name=parent).execute()
# Format and print response
for i in range(len(requests)):
print('\n{}'.format(requests[i]))
print(' github : {}'.format(response['predictions'][i]['dense'][0]))
print(' nytimes : {}'.format(response['predictions'][i]['dense'][1]))
print(' techcrunch: {}'.format(response['predictions'][i]['dense'][2]))
!gsutil cp gs://cloud-training-demos/courses/machine_learning/deepdive/09_sequence/text_classification/glove.6B.200d.txt gs://$BUCKET/txtcls/
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We will look at the titles of articles and figure out whether the article came from the New York Times, TechCrunch or GitHub.
Step4: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http
Step6: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
Step7: For ML training, we will need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset).
Step8: Below we can see that roughly 75% of the data is used for training, and 25% for evaluation.
Step9: Finally we will save our data, which is currently in-memory, to disk.
Step10: TensorFlow/Keras Code
Step11: Train on the Cloud
Step12: Monitor training with TensorBoard
Step13: We will take the last export and deploy it as a REST API using Google Cloud Machine Learning Engine
Step14: Get Predictions
Step15: Our serving input function expects the already tokenized representations of the headlines, so we do that pre-processing in the code before calling the REST API.
Step16: How many of your predictions were correct?
|
4,332
|
<ASSISTANT_TASK:>
Python Code:
x, fs = librosa.load('simpleLoop.wav', sr=44100)
print x.shape
librosa.display.waveplot(x, fs)
from IPython.display import Audio
Audio(x, rate=fs)
onsets = librosa.onset.onset_detect(x, fs)
print onsets # frame numbers of estimated onsets
S = librosa.stft(x)
logS = librosa.logamplitude(S)
librosa.display.specshow(logS, fs, alpha=0.75, x_axis='time')
plt.vlines(onsets, 0, logS.shape[0], color='r')
from essentia.standard import OnsetRate
find_onsets = OnsetRate()
onset_times, onset_rate = find_onsets(x)
print onset_times
print onset_rate
from essentia.standard import AudioOnsetsMarker
onsets_marker = AudioOnsetsMarker(onsets=onset_times, type='beep')
x_beeps = onsets_marker(x)
Audio(x_beeps, rate=fs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot the signal
Step2: Listen
Step3: librosa.onset.onset_detect
Step4: Plot the onsets on top of a spectrogram of the audio
Step5: essentia.standard.OnsetRate
Step6: essentia.standard.AudioOnsetsMarker
|
4,333
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
# YOUR CODE HERE
def hat(x,a,b):
v=-1*a*x**2+b*x**4
return v
assert hat(0.0, 1.0, 1.0)==0.0
assert hat(0.0, 1.0, 1.0)==0.0
assert hat(1.0, 10.0, 1.0)==-9.0
x=np.linspace(-3,3)
b=1.0
a=5.0
plt.plot(x,hat(x,a,b))
# YOUR CODE HERE
x0=-2
a = 5.0
b = 1.0
y=opt.minimize(hat,x0,(a,b))
y.x
assert True # leave this to grade the plot
# YOUR CODE HERE
x0=-2
a = 5.0
b = 1.0
i=0
y.x
mini=[]
x=np.linspace(-3,3)
for i in x:
y=opt.minimize(hat,i,(a,b))
z=int(y.x *100000)
if np.any(mini[:] == z):
i=i+1
else:
mini=np.append(mini,z)
mini=mini/100000
mini
plt.plot(x,hat(x,a,b),label="Hat Function")
plt.plot(mini[0],hat(mini[0],a,b),'ro',label="Minima")
plt.plot(mini[1],hat(mini[1],a,b),'ro')
plt.xlabel=("X-Axis")
plt.ylabel=("Y-Axis")
plt.title("Graph of Function and its Local Minima")
plt.legend()
assert True # leave this for grading the plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hat potential
Step2: Plot this function over the range $x\in\left[-3,3\right]$ with $b=1.0$ and $a=5.0$
Step3: Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$.
|
4,334
|
<ASSISTANT_TASK:>
Python Code:
fifteen_factorial = 15*14*13*12*11*10*9*8*7*6*5*4*3*2*1
print(fifteen_factorial)
import math
print(math.factorial(15))
print("Result correct?", math.factorial(15) == fifteen_factorial)
print(math.factorial(5), math.sqrt(2*math.pi)*5**(5+0.5)*math.exp(-5))
print(math.factorial(10), math.sqrt(2*math.pi)*10**(10+0.5)*math.exp(-10))
print(math.factorial(15), math.sqrt(2*math.pi)*15**(15+0.5)*math.exp(-15))
print(math.factorial(20), math.sqrt(2*math.pi)*20**(20+0.5)*math.exp(-20))
print("Absolute differences:")
print(math.factorial(5) - math.sqrt(2*math.pi)*5**(5+0.5)*math.exp(-5))
print(math.factorial(10) - math.sqrt(2*math.pi)*10**(10+0.5)*math.exp(-10))
print(math.factorial(15) - math.sqrt(2*math.pi)*15**(15+0.5)*math.exp(-15))
print(math.factorial(20) - math.sqrt(2*math.pi)*20**(20+0.5)*math.exp(-20))
print("Relative differences:")
print((math.factorial(5) - math.sqrt(2*math.pi)*5**(5+0.5)*math.exp(-5)) / math.factorial(5))
print((math.factorial(10) - math.sqrt(2*math.pi)*10**(10+0.5)*math.exp(-10)) / math.factorial(10))
print((math.factorial(15) - math.sqrt(2*math.pi)*15**(15+0.5)*math.exp(-15)) / math.factorial(15))
print((math.factorial(20) - math.sqrt(2*math.pi)*20**(20+0.5)*math.exp(-20)) / math.factorial(20))
def cuboid_volume(a, b, c):
Compute the volume of a cuboid with edge lengths a, b, c.
Volume is abc. Only makes sense if all are non-negative.
Parameters
----------
a : float
Edge length 1
b : float
Edge length 2
c : float
Edge length 3
Returns
-------
volume : float
The volume a*b*c
if (a < 0.0) or (b < 0.0) or (c < 0.0):
print("Negative edge length makes no sense!")
return 0
return a*b*c
print(cuboid_volume(1,1,1))
print(cuboid_volume(1,2,3.5))
print(cuboid_volume(0,1,1))
print(cuboid_volume(2,-1,1))
def fall_time(H):
Give the time in seconds for an object to fall to the ground
from H metres.
Parameters
----------
H : float
Starting height (metres)
Returns
-------
T : float
Fall time (seconds)
from math import sqrt
from scipy.constants import g
if (H < 0):
print("Negative height makes no sense!")
return 0
return sqrt(2.0*H/g)
print(fall_time(1))
print(fall_time(10))
print(fall_time(0))
print(fall_time(-1))
def triangle_area(a, b, c):
Compute the area of a triangle with edge lengths a, b, c.
Area is sqrt(s (s-a) (s-b) (s-c)).
s is (a+b+c)/2.
Only makes sense if all are non-negative.
Parameters
----------
a : float
Edge length 1
b : float
Edge length 2
c : float
Edge length 3
Returns
-------
area : float
The triangle area.
from math import sqrt
if (a < 0.0) or (b < 0.0) or (c < 0.0):
print("Negative edge length makes no sense!")
return 0
s = 0.5 * (a + b + c)
return sqrt(s * (s-a) * (s-b) * (s-c))
print(triangle_area(1,1,1)) # Equilateral; answer sqrt(3)/4 ~ 0.433
print(triangle_area(3,4,5)) # Right triangle; answer 6
print(triangle_area(1,1,0)) # Not a triangle; answer 0
print(triangle_area(-1,1,1)) # Not a triangle; exception or 0.
from math import sqrt
x = 1.0
y = 1.0 + 1e-14 * sqrt(3.0)
print("The calculation gives {}".format(1e14*(y-x)))
print("The result should be {}".format(sqrt(3.0)))
a = 1e-3
b = 1e3
c = a
formula1_n3_plus = (-b + sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula1_n3_minus = (-b - sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula2_n3_plus = (2.0*c)/(-b + sqrt(b**2 - 4.0*a*c))
formula2_n3_minus = (2.0*c)/(-b - sqrt(b**2 - 4.0*a*c))
print("For n=3, first formula, solutions are {} and {}.".format(formula1_n3_plus,
formula1_n3_minus))
print("For n=3, second formula, solutions are {} and {}.".format(formula2_n3_plus,
formula2_n3_minus))
a = 1e-4
b = 1e4
c = a
formula1_n4_plus = (-b + sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula1_n4_minus = (-b - sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula2_n4_plus = (2.0*c)/(-b + sqrt(b**2 - 4.0*a*c))
formula2_n4_minus = (2.0*c)/(-b - sqrt(b**2 - 4.0*a*c))
print("For n=4, first formula, solutions are {} and {}.".format(formula1_n4_plus,
formula1_n4_minus))
print("For n=4, second formula, solutions are {} and {}.".format(formula2_n4_plus,
formula2_n4_minus))
def g(f, X, delta):
Approximate the derivative of a given function at a point.
Parameters
----------
f : function
Function to be differentiated
X : real
Point at which the derivative is evaluated
delta : real
Step length
Returns
-------
g : real
Approximation to the derivative
return (f(X+delta) - f(X)) / delta
from math import exp
for n in range(1, 8):
print("For n={}, the approx derivative is {}.".format(n, g(exp, 0.0, 10**(-2.0*n))))
def isprime(n):
Checks to see if an integer is prime.
Parameters
----------
n : integer
Number to check
Returns
-------
isprime : Boolean
If n is prime
# No number less than 2 can be prime
if n < 2:
return False
# We only need to check for divisors up to sqrt(n)
for m in range(2, int(n**0.5)+1):
if n%m == 0:
return False
# If we've got this far, there are no divisors.
return True
for n in range(50):
if isprime(n):
print("Function says that {} is prime.".format(n))
n = 2
while (not isprime(n)) or (isprime(2**n-1)):
n += 1
print("The first n such that 2^n-1 is not prime is {}.".format(n))
for n in range(2, 41):
if isprime(n) and isprime(2**n-1):
print("n={} is such that 2^n-1 is prime.".format(n))
def prime_factors(n):
Generate all the prime factors of n.
Parameters
----------
n : integer
Number to be checked
Returns
-------
factors : dict
Prime factors (keys) and multiplicities (values)
factors = {}
m = 2
while m <= n:
if n%m == 0:
factors[m] = 1
n //= m
while n%m == 0:
factors[m] += 1
n //= m
m += 1
return factors
for n in range(17, 21):
print("Prime factors of {} are {}.".format(n, prime_factors(n).keys()))
print("Multiplicities of prime factors of 48 are {}.".format(prime_factors(48).values()))
def divisors(n):
Generate all integer divisors of n.
Parameters
----------
n : integer
Number to be checked
Returns
-------
divs : list
All integer divisors, including 1.
divs = [1]
m = 2
while m <= n/2:
if n%m == 0:
divs.append(m)
m += 1
return divs
for n in range(16, 21):
print("The divisors of {} are {}.".format(n, divisors(n)))
def isperfect(n):
Check if a number is perfect.
Parameters
----------
n : integer
Number to check
Returns
-------
isperfect : Boolean
Whether it is perfect or not.
divs = divisors(n)
sum_divs = 0
for d in divs:
sum_divs += d
return n == sum_divs
for n in range(2,10000):
if (isperfect(n)):
factors = prime_factors(n)
print("{} is perfect.\n"
"Divisors are {}.\n"
"Prime factors {} (multiplicities {}).".format(
n, divisors(n), factors.keys(), factors.values()))
%timeit isperfect(2**(3-1)*(2**3-1))
%timeit isperfect(2**(5-1)*(2**5-1))
%timeit isperfect(2**(7-1)*(2**7-1))
%timeit isperfect(2**(13-1)*(2**13-1))
def logistic(x0, r, N = 1000):
sequence = [x0]
xn = x0
for n in range(N):
xnew = r*xn*(1.0-xn)
sequence.append(xnew)
xn = xnew
return sequence
import numpy
from matplotlib import pyplot
%matplotlib inline
x0 = 0.5
N = 2000
sequence1 = logistic(x0, 1.5, N)
sequence2 = logistic(x0, 3.5, N)
pyplot.plot(sequence1[-100:], 'b-', label = r'$r=1.5$')
pyplot.plot(sequence2[-100:], 'k-', label = r'$r=3.5$')
pyplot.xlabel(r'$n$')
pyplot.ylabel(r'$x$')
pyplot.show()
import numpy
from matplotlib import pyplot
%matplotlib inline
r_values = numpy.linspace(1.0, 4.0, 401)
x0 = 0.5
N = 2000
for r in r_values:
sequence = logistic(x0, r, N)
pyplot.plot(r*numpy.ones_like(sequence[1000:]), sequence[1000:], 'k.')
pyplot.xlabel(r'$r$')
pyplot.ylabel(r'$x$')
pyplot.show()
def in_Mandelbrot(c, n_iterations = 100):
z0 = 0.0 + 0j
in_set = True
n = 0
zn = z0
while in_set and (n < n_iterations):
n += 1
znew = zn**2 + c
in_set = abs(znew) < 2.0
zn = znew
return in_set
c_values = [0.0, 2+2j, 2-2j, -2+2j, -2-2j]
for c in c_values:
print("Is {} in the Mandelbrot set? {}.".format(c, in_Mandelbrot(c)))
import numpy
def grid_Mandelbrot(N):
x = numpy.linspace(-2.0, 2.0, N)
X, Y = numpy.meshgrid(x, x)
C = X + 1j*Y
grid = numpy.zeros((N, N), int)
for nx in range(N):
for ny in range(N):
grid[nx, ny] = int(in_Mandelbrot(C[nx, ny]))
return grid
from matplotlib import pyplot
%matplotlib inline
pyplot.imshow(grid_Mandelbrot(100))
from math import log
def log_Mandelbrot(c, n_iterations = 100):
z0 = 0.0 + 0j
in_set = True
n = 0
zn = z0
while in_set and (n < n_iterations):
n += 1
znew = zn**2 + c
in_set = abs(znew) < 2.0
zn = znew
return log(n)
def log_grid_Mandelbrot(N):
x = numpy.linspace(-2.0, 2.0, N)
X, Y = numpy.meshgrid(x, x)
C = X + 1j*Y
grid = numpy.zeros((N, N), int)
for nx in range(N):
for ny in range(N):
grid[nx, ny] = log_Mandelbrot(C[nx, ny])
return grid
from matplotlib import pyplot
%matplotlib inline
pyplot.imshow(log_grid_Mandelbrot(100))
pyplot.imshow(log_grid_Mandelbrot(1000)[600:800,400:600])
class Eqint(object):
def __init__(self, sequence):
self.sequence = sequence
def __repr__(self):
return str(len(self.sequence))
def __eq__(self, other):
return len(self.sequence)==len(other.sequence)
zero = Eqint([])
one_list = Eqint([1])
one_tuple = Eqint((1,))
one_string = Eqint('1')
print("Is zero equivalent to one? {}, {}, {}".format(zero == one_list,
zero == one_tuple,
zero == one_string))
print("Is one equivalent to one? {}, {}, {}.".format(one_list == one_tuple,
one_list == one_string,
one_tuple == one_string))
print(zero)
print(one_list)
print(one_tuple)
print(one_string)
class Eqint(object):
def __init__(self, sequence):
self.sequence = sequence
def __repr__(self):
return str(len(self.sequence))
def __eq__(self, other):
return len(self.sequence)==len(other.sequence)
def __add__(a, b):
return Eqint(tuple(a.sequence) + tuple(b.sequence))
zero = Eqint([])
one_list = Eqint([1])
one_tuple = Eqint((1,))
one_string = Eqint('1')
sum_eqint = zero + one_list + one_tuple + one_string
print("The sum is {}.".format(sum_eqint))
print("The internal sequence is {}.".format(sum_eqint.sequence))
positive_integers = []
zero = Eqint([])
positive_integers.append(zero)
N = 10
for n in range(1,N+1):
positive_integers.append(Eqint(list(positive_integers)))
print("The 'final' Eqint is {}".format(positive_integers[-1]))
print("Its sequence is {}".format(positive_integers[-1].sequence))
print("That is, it contains all Eqints with length less than 10.")
def normal_form(numerator, denominator):
from fractions import gcd
factor = gcd(numerator, denominator)
return numerator//factor, denominator//factor
print(normal_form(3, 2))
print(normal_form(15, 3))
print(normal_form(20, 42))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
q1 = Rational(3, 2)
print(q1)
q2 = Rational(15, 3)
print(q2)
q3 = Rational(20, 42)
print(q3)
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
print(Rational(1,2) + Rational(1,3) + Rational(1,6))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
print(Rational(1,3)*Rational(15,2)*Rational(2,5))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __rmul__(self, other):
numerator = self.numerator * other
return Rational(numerator, self.denominator)
def __sub__(a, b):
return a + (-1)*b
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
half = Rational(1,2)
print(2*half)
print(half+(-1)*half)
print(half-half)
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __rmul__(self, other):
numerator = self.numerator * other
return Rational(numerator, self.denominator)
def __sub__(a, b):
return a + (-1)*b
def __float__(a):
return float(a.numerator) / float(a.denominator)
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
print(float(Rational(1,2)))
print(float(Rational(1,3)))
print(float(Rational(1,11)))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __rmul__(self, other):
numerator = self.numerator * other
return Rational(numerator, self.denominator)
def __sub__(a, b):
return a + (-1)*b
def __float__(a):
return float(a.numerator) / float(a.denominator)
def __lt__(a, b):
return a.numerator * b.denominator < a.denominator * b.numerator
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = '\n'+str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
q_list = [Rational(n//2, n) for n in range(2, 12)]
print(sorted(q_list))
def wallis_rational(N):
The partial product approximation to pi using the first N terms of Wallis' formula.
Parameters
----------
N : int
Number of terms in product
Returns
-------
partial : Rational
A rational number approximation to pi
partial = Rational(2,1)
for n in range(1, N+1):
partial = partial * Rational((2*n)**2, (2*n-1)*(2*n+1))
return partial
pi_list = [wallis_rational(n) for n in range(1, 21)]
print(pi_list)
print(sorted(pi_list))
import numpy
print(numpy.pi-numpy.array(list(map(float, pi_list))))
lhs = 27**5 + 84**5 + 110**5 + 133**5
rhs = 144**5
print("Does the LHS {} equal the RHS {}? {}".format(lhs, rhs, lhs==rhs))
import numpy
import itertools
input_list = numpy.arange(1, 7)
combinations = list(itertools.combinations(input_list, 4))
print(combinations)
n_combinations = 144*143*142*141/24
print("Number of combinations of 4 objects from 144 is {}".format(n_combinations))
from matplotlib import pyplot
%matplotlib inline
n = numpy.arange(5, 51)
N = numpy.zeros_like(n)
for i, n_c in enumerate(n):
combinations = list(itertools.combinations(numpy.arange(1,n_c+1), 4))
N[i] = len(combinations)
pyplot.figure(figsize=(12,6))
pyplot.loglog(n, N, linestyle='None', marker='x', color='k', label='Combinations')
pyplot.loglog(n, n**4, color='b', label=r'$n^4$')
pyplot.xlabel(r'$n$')
pyplot.ylabel(r'$N$')
pyplot.legend(loc='upper left')
pyplot.show()
nmax=145
range_to_power = numpy.arange(1, nmax)**5
lhs_combinations = list(itertools.combinations(range_to_power, 4))
lhs_sums = []
for lhs_terms in lhs_combinations:
lhs_sums.append(numpy.sum(numpy.array(lhs_terms)))
for i, lhs in enumerate(lhs_sums):
if lhs in range_to_power:
rhs_primitive = int(lhs**(0.2))
lhs_primitive = (numpy.array(lhs_combinations[i])**(0.2)).astype(int)
print("The LHS terms are {}.".format(lhs_primitive))
print("The RHS term is {}.".format(rhs_primitive))
def dvdt(v, t, sigma, rho, beta):
Define the Lorenz system.
Parameters
----------
v : list
State vector
t : float
Time
sigma : float
Parameter
rho : float
Parameter
beta : float
Parameter
Returns
-------
dvdt : list
RHS defining the Lorenz system
x, y, z = v
return [sigma*(y-x), x*(rho-z)-y, x*y-beta*z]
import numpy
from scipy.integrate import odeint
v0 = [1.0, 1.0, 1.0]
sigma = 10.0
beta = 8.0/3.0
t_values = numpy.linspace(0.0, 100.0, 5000)
rho_values = [13.0, 14.0, 15.0, 28.0]
v_values = []
for rho in rho_values:
params = (sigma, rho, beta)
v = odeint(dvdt, v0, t_values, args=params)
v_values.append(v)
%matplotlib inline
from matplotlib import pyplot
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig = pyplot.figure(figsize=(12,6))
for i, v in enumerate(v_values):
ax = fig.add_subplot(2,2,i+1,projection='3d')
ax.plot(v[:,0], v[:,1], v[:,2])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
ax.set_title(r"$\rho={}$".format(rho_values[i]))
pyplot.show()
t_values = numpy.linspace(0.0, 40.0, 4000)
rho = 28.0
params = (sigma, rho, beta)
v_values = []
v0_values = [[1.0,1.0,1.0],
[1.0+1e-5,1.0+1e-5,1.0+1e-5]]
for v0 in v0_values:
v = odeint(dvdt, v0, t_values, args=params)
v_values.append(v)
fig = pyplot.figure(figsize=(12,6))
line_colours = 'by'
for tstart in range(4):
ax = fig.add_subplot(2,2,tstart+1,projection='3d')
for i, v in enumerate(v_values):
ax.plot(v[tstart*1000:(tstart+1)*1000,0],
v[tstart*1000:(tstart+1)*1000,1],
v[tstart*1000:(tstart+1)*1000,2],
color=line_colours[i])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
ax.set_title(r"$t \in [{},{}]$".format(tstart*10, (tstart+1)*10))
pyplot.show()
import sympy
sympy.init_printing()
y, t = sympy.symbols('y, t')
sympy.dsolve(sympy.diff(y(t), t) + y(t)**2 - sympy.exp(-t), y(t))
for n in range(2, 11):
ode_solution = sympy.dsolve(sympy.diff(y(t), t) + y(t)**n - sympy.exp(-t), y(t),
ics = {y(0) : 1})
print(ode_solution)
%matplotlib inline
for n in range(2, 11):
ode_solution = sympy.dsolve(sympy.diff(y(t), t) + y(t)**n - sympy.exp(-t), y(t),
ics = {y(0) : 1})
sympy.plot(ode_solution.rhs.removeO(), (t, 0, 1));
def all_primes(N):
Return all primes less than or equal to N.
Parameters
----------
N : int
Maximum number
Returns
-------
prime : generator
Prime numbers
primes = []
for n in range(2, N+1):
is_n_prime = True
for p in primes:
if n%p == 0:
is_n_prime = False
break
if is_n_prime:
primes.append(n)
yield n
from itertools import tee
def pair_primes(N):
"Generate consecutive prime pairs, using the itertools recipe"
a, b = tee(all_primes(N))
next(b, None)
return zip(a, b)
def check_twin(pair):
Take in a pair of integers, check if they differ by 2.
p1, p2 = pair
return p2-p1 == 2
def twin_primes(N):
Return all twin primes
return filter(check_twin, pair_primes(N))
for tp in twin_primes(20):
print(tp)
def pi_N(N):
Use the quantify pattern from itertools to count the number of twin primes.
return sum(map(check_twin, pair_primes(N)))
pi_N(1000)
import numpy
from matplotlib import pyplot
%matplotlib inline
N = numpy.array([2**k for k in range(4, 17)])
twin_prime_fraction = numpy.array(list(map(pi_N, N))) / N
pyplot.semilogx(N, twin_prime_fraction)
pyplot.xlabel(r"$N$")
pyplot.ylabel(r"$\pi_N / N$")
pyplot.show()
pyplot.semilogx(N, twin_prime_fraction * numpy.log(N)**2)
pyplot.xlabel(r"$N$")
pyplot.ylabel(r"$\pi_N \times \log(N)^2 / N$")
pyplot.show()
class Polynomial(object):
Representing a polynomial.
explanation = "I am a polynomial"
def __init__(self, roots, leading_term):
self.roots = roots
self.leading_term = leading_term
self.order = len(roots)
def __repr__(self):
string = str(self.leading_term)
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
def __mul__(self, other):
roots = self.roots + other.roots
leading_term = self.leading_term * other.leading_term
return Polynomial(roots, leading_term)
def explain_to(self, caller):
print("Hello, {}. {}.".format(caller,self.explanation))
print("My roots are {}.".format(self.roots))
return None
class Monomial(Polynomial):
Representing a monomial, which is a polynomial with leading term 1.
explanation = "I am a monomial"
def __init__(self, roots):
Polynomial.__init__(self, roots, 1)
def __repr__(self):
string = ""
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
def basis_pN(N):
A generator for the simplest basis of P^N.
for n in range(N+1):
yield Monomial(n*[0])
for poly in basis_pN(3):
print(poly)
class Monomial(Polynomial):
Representing a monomial, which is a polynomial with leading term 1.
explanation = "I am a monomial"
def __init__(self, roots):
Polynomial.__init__(self, roots, 1)
def __repr__(self):
if len(self.roots):
string = ""
n_zero_roots = len(self.roots) - numpy.count_nonzero(self.roots)
if n_zero_roots == 1:
string = "x"
elif n_zero_roots > 1:
string = "x^{}".format(n_zero_roots)
else: # Monomial degree 0.
string = "1"
for root in self.roots:
if root > 0:
string = string + "(x - {})".format(root)
elif root < 0:
string = string + "(x + {})".format(-root)
return string
for poly in basis_pN(3):
print(poly)
def basis_pN_variant(N):
A generator for the 'sum' basis of P^N.
for n in range(N+1):
yield Monomial(range(n+1))
for poly in basis_pN_variant(4):
print(poly)
from itertools import product
def basis_product():
Basis of the product space
yield from product(basis_pN(3), basis_pN_variant(4))
for p1, p2 in basis_product():
print("Basis element is ({}) X ({}).".format(p1, p2))
def basis_product_long_form():
Basis of the product space (without using yield_from)
prod = product(basis_pN(3), basis_pN_variant(4))
yield next(prod)
for p1, p2 in basis_product():
print("Basis element is ({}) X ({}).".format(p1, p2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 2
Step2: Exercise 3
Step4: We see that the relative error decreases, whilst the absolute error grows (significantly).
Step6: In later cases, after having covered exceptions, I would suggest raising a NotImplementedError for negative edge lengths.
Step8: Exercise 3
Step9: Floating point numbers
Step10: We see that the first three digits are correct. This isn't too surprising
Step12: There is a difference in the fifth significant figure in both solutions in the first case, which gets to the third (arguably the second) significant figure in the second case. Comparing to the limiting solutions above, we see that the larger root is definitely more accurately captured with the first formula than the second (as the result should be bigger than $10^{-2n}$).
Step13: Exercise 6
Step15: We have a combination of floating point inaccuracies
Step16: Exercise 2
Step17: Exercise 3
Step19: Exercise 4
Step21: Exercise 5
Step23: Exercise 6
Step24: Exercise 7
Step25: It's worth thinking about the operation counts of the various functions implemented here. The implementations are inefficient, but even in the best case you see how the number of operations (and hence computing time required) rapidly increases.
Step26: Exercise 2
Step27: This suggests that, for $r=1.5$, the sequence has settled down to a fixed point. In the $r=3.5$ case it seems to be moving between four points repeatedly.
Step28: Exercise 4
Step29: Exercise 2
Step30: Exercise 3
Step31: Exercise 4
Step32: Exercise 5
Step33: Exercise 6
Step34: Equivalence classes
Step35: Exercise 2
Step36: Exercise 3
Step37: Exercise 4
Step38: Exercise 5
Step39: Rational numbers
Step41: Exercise 2
Step43: Exercise 3
Step45: Exercise 4
Step47: Exercise 5
Step49: Exercise 6
Step51: Exercise 7
Step53: Exercise 8
Step54: The shortest published Mathematical paper
Step55: Exercise 2
Step56: The combinations function returns all the combinations (ignoring order) of r elements from a given list. For example, take a list of length 6, [1, 2, 3, 4, 5, 6] and compute all the combinations of length 4
Step57: We can already see that the number of terms to consider is large.
Step58: Exercise 2a
Step59: With 17 million combinations to work with, we'll need to be a little careful how we compute.
Step60: Then calculate the sums
Step61: Finally, loop through the sums and check to see if it matches any possible term on the RHS
Step63: Lorenz attractor
Step64: Exercise 2
Step65: Exercise 3
Step66: This shows the sensitive dependence on initial conditions that is characteristic of chaotic behaviour.
Step67: Exercise 2
Step68: Exercise 3
Step70: Twin primes
Step71: Now we can generate pairs using the pairwise recipe
Step73: We could examine the results of the two primes directly. But an efficient solution is to use python's filter function. To do this, first define a function checking if the pair are twin primes
Step75: Then use the filter function to define another generator
Step76: Now check by finding the twin primes with $N<20$
Step78: Exercise 2
Step79: Exercise 3
Step80: For those that have checked Wikipedia, you'll see Brun's theorem which suggests a specific scaling, that $\pi_N$ is bounded by $C N / \log(N)^2$. Checking this numerically on this data
Step83: A basis for the polynomials
Step85: Now we can define the first basis
Step86: Then test it on $\mathbb{P}^N$
Step88: This looks horrible, but is correct. To really make this look good, we need to improve the output. If we use
Step89: then we can deal with the uglier cases, and re-running the test we get
Step91: An even better solution would be to use the numpy.unique function as in this stackoverflow answer (the second one!) to get the frequency of all the roots.
Step93: I am too lazy to work back through the definitions and flip all the signs; it should be clear how to do this!
Step95: I've cheated here as I haven't introduced the yield from syntax (which returns an iterator from a generator). We could write this out instead as
|
4,335
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from scipy.optimize import fsolve
from scipy import integrate
import matplotlib.pyplot as plt
from clawpack import pyclaw
from clawpack import riemann
from clawpack.visclaw.ianimate import ianimate
import matplotlib
plt.style.use('seaborn-talk')
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/V_obYAebT8g" frameborder="0" allowfullscreen></iframe>')
def steady_rhs(h,r,alpha,g=1.):
return h/(g/alpha**2 * r**3 * h**3 - r)
r = np.linspace(0.5, 10)
h0 = 1.; u0 = 1.; alpha = r[0]*h0*u0
h = np.squeeze(integrate.odeint(steady_rhs,h0,r,args=(alpha,0.))) # Zero gravity
plt.plot(r,h); plt.title('$F=\infty$'); plt.xlabel('r'); plt.ylabel('h');
h0 = 1.; u0 = 2.; alpha = r[0]*h0*u0; g=1.
h = np.squeeze(integrate.odeint(steady_rhs,h0,r,args=(alpha,g)));
u = alpha/(h*r)
plt.figure(figsize=(12,4));
plt.subplot(1,3,1); plt.title('Depth');
plt.xlabel('r'); plt.ylabel('h');
plt.plot(r,h);
plt.subplot(1,3,2); plt.title('Velocity');
plt.xlabel('r'); plt.ylabel('u');
plt.plot(r,alpha/(r*h));
plt.subplot(1,3,3); plt.title('Froude number');
plt.xlabel('r'); plt.ylabel('$F$');
plt.plot(r,alpha/(r*h*np.sqrt(g*h)));
plt.tight_layout();
h0 = 1.; u0 = 0.5; alpha = r[0]*h0*u0; g=1.
h = np.squeeze(integrate.odeint(steady_rhs,h0,r,args=(alpha,g)));
u = alpha/(h*r)
plt.figure(figsize=(12,4));
plt.subplot(1,3,1); plt.title('Depth');
plt.xlabel('r'); plt.ylabel('h');
plt.plot(r,h);
plt.subplot(1,3,2); plt.title('Velocity');
plt.xlabel('r'); plt.ylabel('u');
plt.plot(r,alpha/(r*h));
plt.subplot(1,3,3); plt.title('Froude number');
plt.xlabel('r'); plt.ylabel('$F$');
plt.plot(r,alpha/(r*h*np.sqrt(g*h)));
plt.tight_layout();
def initial_and_boundary_data(r_jump = 1.,r_inner = 0.5,r_outer = 5.,
num_cells = 501,g=1.,h_in=1.,u_in=2.):
r = pyclaw.Dimension(r_inner, r_outer, num_cells, name='r')
rc = r.centers
i_jump = np.argmin(np.abs(rc-r_jump))
# Left boundary
h_inner = h_in
u_inner = u_in
beta_inner = r_inner*h_inner*u_inner
h = 0*rc
u = 0*rc
d = r.delta
rvals = np.insert(rc[:i_jump+1],(0),[rc[0]-2*d,rc[0]-d])
beta = rvals[0]*h_inner*u_inner
hh = integrate.odeint(steady_rhs,h_inner,rvals,args=(beta,g))
hh = np.squeeze(hh)
uu = beta/(hh*rvals)
h[:i_jump+1] = np.squeeze(hh[2:])
u[:i_jump+1] = uu[2:]
lower_bc_data = [hh[:2], uu[:2]]
# Jump in h
# Left side of jump
h_m = h[i_jump]; u_m = u[i_jump]
aleph = (-3*h_m+np.sqrt(h_m**2+8*h_m*u_m**2/g))/2.
# Right side of jump
h_p = h_m + aleph; u_p = h_m*u_m/h_p
h[i_jump+1] = h_p; u[i_jump+1] = u_p
# Outer part of solution
beta_outer = rc[i_jump+1]*h[i_jump+1]*u[i_jump+1]
rvals = np.append(rc[i_jump+1:],[rc[-1]+d,rc[-1]+2*d])
hh = integrate.odeint(steady_rhs,h_p,rvals,args=(beta_outer,g))
hh = np.squeeze(hh)
uu = beta_outer/(rvals*hh)
h[i_jump+1:] = hh[:-2]
u[i_jump+1:] = uu[:-2]
upper_bc_data = [hh[-2:],uu[-2:]]
return h, u, upper_bc_data, lower_bc_data, rc
h, u, _, _, rc = initial_and_boundary_data()
plt.plot(rc, h)
plt.xlim(0.5,5)
def step_radial_src(solver,state,dt):
Geometric source terms for SW equations with cylindrical symmetry.
Integrated using a 2-stage, 2nd-order Runge-Kutta method.
This is a Clawpack-style source term routine, which approximates
the integral of the source terms over a step.
dt2 = dt/2.
q = state.q
rad = state.grid.r.centers
h = q[0,:]
u = q[1,:]/h
qstar = np.empty(q.shape)
qstar[0,:] = q[0,:] - dt2/rad * h*u
qstar[1,:] = q[1,:] - dt2/rad * h*u*u
h = qstar[0,:]
u = qstar[1,:]/h
q[0,:] = q[0,:] - dt/rad * h*u
q[1,:] = q[1,:] - dt/rad * h*u*u
def inner_state(state,dim,t,qbc,auxbc,num_ghost):
h = state.problem_data['lower_bc_data'][0]
u = state.problem_data['lower_bc_data'][1]
qbc[0,:num_ghost] = h
qbc[1,:num_ghost] = h*u
def outer_state(state,dim,t,qbc,auxbc,num_ghost):
h = state.problem_data['upper_bc_data'][0]
u = state.problem_data['upper_bc_data'][1]
qbc[0,-num_ghost:] = h
qbc[1,-num_ghost:] = h*u
def setup(r_jump=1.,r_inner=0.5,r_outer=3.,num_cells=501,g=1.):
r = pyclaw.Dimension(r_inner, r_outer, num_cells=num_cells, name='r')
h, u, upper_bc_data, lower_bc_data, _ = \
initial_and_boundary_data(r_jump=r_jump,g=g, r_inner=r_inner,
r_outer=r_outer, num_cells=num_cells)
solver = pyclaw.ClawSolver1D(riemann_solver=riemann.shallow_roe_with_efix_1D)
solver.bc_lower[0] = pyclaw.BC.custom
solver.user_bc_lower = inner_state
solver.bc_upper[0] = pyclaw.BC.custom
solver.user_bc_upper = outer_state
solver.step_source = step_radial_src
domain = pyclaw.Domain([r])
state = pyclaw.State(domain,solver.num_eqn)
state.problem_data['grav'] = g
state.problem_data['lower_bc_data'] = lower_bc_data
state.problem_data['upper_bc_data'] = upper_bc_data
state.q[0,:] = h
state.q[1,:] = h*u
claw = pyclaw.Controller()
claw.solver = solver
claw.solution = pyclaw.Solution(state,domain)
claw.tfinal = 15.0
claw.keep_copy = True
claw.num_output_times = 50
return claw
claw = setup()
claw.verbosity=0
claw.run()
anim = ianimate(claw)
plt.close('all')
HTML(anim.to_jshtml())
def setup_constant_initial_data(r_jump=1.,r_inner=0.5,r_outer=3.,
num_cells=501,g=1.):
r = pyclaw.Dimension(r_inner, r_outer, num_cells=num_cells, name='r')
solver = pyclaw.ClawSolver1D(riemann_solver=riemann.shallow_roe_with_efix_1D)
solver.bc_lower[0] = pyclaw.BC.custom
solver.user_bc_lower = inner_state
solver.bc_upper[0] = pyclaw.BC.custom
solver.user_bc_upper = outer_state
solver.step_source = step_radial_src
domain = pyclaw.Domain([r])
state = pyclaw.State(domain,solver.num_eqn)
state.problem_data['grav'] = g
hl = 0.5; hul = 3.
hr = 2.; hur = 0.1
state.problem_data['lower_bc_data'] = np.array([[hl,hl],[hul,hul]])
state.problem_data['upper_bc_data'] = np.array([[hr,hr],[hur,hur]])
state.q[0,:] = 1.
state.q[1,:] = 0.
claw = pyclaw.Controller()
claw.solver = solver
claw.solution = pyclaw.Solution(state,domain)
claw.tfinal = 15.0
claw.keep_copy = True
claw.num_output_times = 50
return claw
claw = setup_constant_initial_data()
claw.verbosity=0
claw.run()
anim = ianimate(claw)
HTML(anim.to_jshtml())
def compute_inner_values(Q,a,r0):
Q: flow rate
a: jet radius
r0: inner domain radius
assert r0 >= a
h0 = a**2/(2*r0)
u0 = Q/(2*np.pi*r0*h0)
return h0, u0
def jump_location(Q,r_jet,h_inf,r_inf=100.,g=1.,r0=None,tol=1./10000):
rPredict location of hydraulic jump for given inner
radius flow and asymptotic depth.
if r0 == None:
r0 = r_jet
h0, u0 = compute_inner_values(Q,r_jet,r0)
F_in = u0/np.sqrt(g*h0) # Inflow Froude number
assert F_in > 1 # Initial flow must be supercritical
r = np.linspace(r0,r_inf,int(round(1./tol)))
beta = r0 * h0 * u0
u_inf = u0 * (r0/r_inf) * (h0/h_inf)
F_inf = u_inf/np.sqrt(g*h_inf) # Far-field Froude number
assert F_inf < 1 # Far field flow must be subcritical
# Integrate outward
hh_in = np.squeeze(integrate.odeint(steady_rhs,h0,r,args=(beta,g)))
uu_in = beta/(r*hh_in)
hh_out = np.squeeze(integrate.odeint(steady_rhs,h_inf,-r[::-1],args=(beta,g)))
hh_out = hh_out[::-1]
F_l = uu_in/np.sqrt(g*hh_in) # Froude number for left state
phi = hh_in - hh_out + 1.5*hh_in*(np.sqrt(1.+8./9.*(F_l**2-1.))-1)
jump_loc = np.argmin(np.abs(phi))
profile = 0*r
profile[:jump_loc] = hh_in[:jump_loc]
profile[jump_loc:] = hh_out[jump_loc:]
return r[jump_loc], r, profile
r_jump, r, profile = jump_location(Q=200.,r_jet=1.,h_inf=1.,g=980.,tol=1.e-6)
print('Jump radius: '+str(r_jump)+' cm')
plt.clf()
plt.plot(r,profile)
plt.xlim(r[0],10);
Q = 202. # Flow rate (in cm^3/s)
r_jet = 0.3175 # Nozzle radius (in cm)
h_inf = 0.343 # Depth at infinity (in cm)
g = 980. # Gravity (in cm/s^2)
r_jump, r, profile = jump_location(Q,r_jet,h_inf,r_inf=500.,g=980.,tol=1.e-6)
print('Predicted jump radius: '+str(r_jump)+' cm')
print('Measured jump radius: 17 cm')
plt.plot(r,profile)
plt.xlim(r[0],r[-1]);
Q = 4.48
r_jet = 0.1
h_inf = 0.18
r_jump, r, profile = jump_location(Q,r_jet,h_inf,r_inf=50.,g=980.,tol=1.e-6)
print('Predicted jump radius: '+str(r_jump)+' cm')
print('Measured jump radius: 1.2 cm')
plt.plot(r,profile)
plt.xlim(r[0],r[-1]);
Q = 26.
r_jet = 0.215
h_inf = 0.33
r_jump, r, profile = jump_location(Q,r_jet,h_inf,r_inf=200.,g=980.,tol=1.e-6)
print('Predicted jump radius: '+str(r_jump)+' cm')
print('Measured jump radius: 2.3 cm')
plt.plot(r,profile)
plt.xlim(r[0],r[-1]);
claw = setup()
claw.solver.bc_upper[0] = pyclaw.BC.extrap
claw.verbosity = 0
claw.run()
anim = ianimate(claw);
plt.close('all')
HTML(anim.to_jshtml())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Our next example is something you can experiment with in your own home. Go to the kitchen sink, position the faucet over a flat part of the sink (not over the drain), and turn it on. Carefully examine the flow of water in the sink. You will see that near the jet of water coming from the faucet, there is a region of very shallow, very fast-flowing water. Then at a certain distance, the water suddenly becomes deeper and slower moving. This jump is relatively stable and will approximately form a circle if the bottom of the sink is flat around the area under the faucet.
Step2: This jump (known as a hydraulic jump) is a shock wave and the entire flow can be modeled as a sort of multidimensional Riemann problem. Instead of left and right states, we have inner and outer states. To investigate this phenomenon we'll again use the shallow water equations. We'll assume the flow has cylindrical symmetry -- in other words, it depends on the distance away from the center (where water falls from the faucet), but not on the angular coordinate.
Step3: Supercritical flow ($F>1$)
Step4: Subcritical flow ($F<1$)
Step5: A different and complementary approach to deriving steady profiles (see <cite data-cite="Ivings1998"><a href="riemann.html#zobeyer2013radial">(Zobeyer 2013)</a></cite> is to recognize that in such a solution, the energy $gh^2 + \frac{hu^2}{2}$ is constant. More simply
Step6: Here's an example of a solution
Step8: Finite volume simulation
Step9: Dirichlet BCs with transition from super- to subsonic flow generically create a hydraulic jump
Step11: Comparison with experimental results
Step13: Locating the jump
Step14: Watson's experiment
Step15: Clearly, some of the effects we have ignored must be important! In particular, as Watson (and others) argue, viscosity or friction becomes very significant in the shallow flow before the jump, causing the jump to emerge much closer to the jet than this inviscid model predicts.
Step16: The difference is less extreme, but still very substantial. It is worth noting also that if we consider water flowing onto an infinite flat plate, the purely hyperbolic model (with no viscosity or friction) doesn't predict any jump at all, because there is no mechanism forcing the flow to transition to a subsonic state. We can observe this in the simulation if we set the boundary condition at the outer radius to outflow.
|
4,336
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
#export
from exp.nb_10 import *
path = datasets.untar_data(datasets.URLs.IMAGENETTE_160)
tfms = [make_rgb, ResizeFixed(128), to_byte_tensor, to_float_tensor]
bs = 64
il = ImageList.from_files(path, tfms=tfms)
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='val'))
ll = label_by_func(sd, parent_labeler, proc_y=CategoryProcessor())
data = ll.to_databunch(bs, c_in=3, c_out=10, num_workers=4)
img1 = PIL.Image.open(ll.train.x.items[0])
img1
img2 = PIL.Image.open(ll.train.x.items[4000])
img2
mixed_up = ll.train.x[0] * 0.3 + ll.train.x[4000] * 0.7
plt.imshow(mixed_up.permute(1,2,0));
# PyTorch has a log-gamma but not a gamma, so we'll create one
Γ = lambda x: x.lgamma().exp()
facts = [math.factorial(i) for i in range(7)]
plt.plot(range(7), facts, 'ro')
plt.plot(torch.linspace(0,6), Γ(torch.linspace(0,6)+1))
plt.legend(['factorial','Γ']);
torch.linspace(0,0.9,10)
_,axs = plt.subplots(1,2, figsize=(12,4))
x = torch.linspace(0,1, 100)
for α,ax in zip([0.1,0.8], axs):
α = tensor(α)
# y = (x.pow(α-1) * (1-x).pow(α-1)) / (gamma_func(α ** 2) / gamma_func(α))
y = (x**(α-1) * (1-x)**(α-1)) / (Γ(α)**2 / Γ(2*α))
ax.plot(x,y)
ax.set_title(f"α={α:.1}")
#export
class NoneReduce():
def __init__(self, loss_func):
self.loss_func,self.old_red = loss_func,None
def __enter__(self):
if hasattr(self.loss_func, 'reduction'):
self.old_red = getattr(self.loss_func, 'reduction')
setattr(self.loss_func, 'reduction', 'none')
return self.loss_func
else: return partial(self.loss_func, reduction='none')
def __exit__(self, type, value, traceback):
if self.old_red is not None: setattr(self.loss_func, 'reduction', self.old_red)
#export
from torch.distributions.beta import Beta
def unsqueeze(input, dims):
for dim in listify(dims): input = torch.unsqueeze(input, dim)
return input
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction=='mean' else loss.sum() if reduction=='sum' else loss
#export
class MixUp(Callback):
_order = 90 #Runs after normalization and cuda
def __init__(self, α:float=0.4): self.distrib = Beta(tensor([α]), tensor([α]))
def begin_fit(self): self.old_loss_func,self.run.loss_func = self.run.loss_func,self.loss_func
def begin_batch(self):
if not self.in_train: return #Only mixup things during training
λ = self.distrib.sample((self.yb.size(0),)).squeeze().to(self.xb.device)
λ = torch.stack([λ, 1-λ], 1)
self.λ = unsqueeze(λ.max(1)[0], (1,2,3))
shuffle = torch.randperm(self.yb.size(0)).to(self.xb.device)
xb1,self.yb1 = self.xb[shuffle],self.yb[shuffle]
self.run.xb = lin_comb(self.xb, xb1, self.λ)
def after_fit(self): self.run.loss_func = self.old_loss_func
def loss_func(self, pred, yb):
if not self.in_train: return self.old_loss_func(pred, yb)
with NoneReduce(self.old_loss_func) as loss_func:
loss1 = loss_func(pred, yb)
loss2 = loss_func(pred, self.yb1)
loss = lin_comb(loss1, loss2, self.λ)
return reduce_loss(loss, getattr(self.old_loss_func, 'reduction', 'mean'))
nfs = [32,64,128,256,512]
def get_learner(nfs, data, lr, layer, loss_func=F.cross_entropy,
cb_funcs=None, opt_func=optim.SGD, **kwargs):
model = get_cnn_model(data, nfs, layer, **kwargs)
init_cnn(model)
return Learner(model, data, loss_func, lr=lr, cb_funcs=cb_funcs, opt_func=opt_func)
cbfs = [partial(AvgStatsCallback,accuracy),
CudaCallback,
ProgressCallback,
partial(BatchTransformXCallback, norm_imagenette),
MixUp]
learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs)
learn.fit(1)
#export
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, ε:float=0.1, reduction='mean'):
super().__init__()
self.ε,self.reduction = ε,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)
nll = F.nll_loss(log_preds, target, reduction=self.reduction)
return lin_comb(loss/c, nll, self.ε)
cbfs = [partial(AvgStatsCallback,accuracy),
CudaCallback,
ProgressCallback,
partial(BatchTransformXCallback, norm_imagenette)]
learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs, loss_func=LabelSmoothingCrossEntropy())
learn.fit(1)
assert learn.loss_func.reduction == 'mean'
!./notebook2script.py 10b_mixup_label_smoothing.ipynb
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mixup
Step2: French horn or tench? The right answer is 70% french horn and 30% tench ;)
Step3: NB
Step4: In the original article, the authors suggested three things
Step5: With a low α, we pick values close to 0. and 1. with a high probability, and the values in the middle all have the same kind of probability. With a greater α, 0. and 1. get a lower probability .
Step6: Then we can use it in MixUp
Step7: Questions
Step8: Note
Step9: And we can check our loss function reduction attribute hasn't changed outside of the training loop
Step10: Export
|
4,337
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(290)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(291)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 5, 2, 3)
L = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 1, 1), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(282)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(3, 3, 3), strides=None, padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(283)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(3, 3, 3), strides=(3, 3, 3), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(284)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(285)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(286)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 5, 4, 2)
L = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 2, 1), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(287)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.7'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(3, 3, 3), strides=None, padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(288)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.8'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
L = MaxPooling3D(pool_size=(3, 3, 3), strides=(3, 3, 3), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(289)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.9'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (2, 3, 3, 4)
L = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(290)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.10'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (2, 3, 3, 4)
L = MaxPooling3D(pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(291)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.11'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (3, 4, 4, 3)
L = MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(292)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.MaxPooling3D.12'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [pooling.MaxPooling3D.1] input 4x4x4x2, pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_last'
Step2: [pooling.MaxPooling3D.2] input 4x5x2x3, pool_size=(2, 2, 2), strides=(2, 1, 1), padding='valid', data_format='channels_last'
Step3: [pooling.MaxPooling3D.3] input 4x4x4x2, pool_size=(3, 3, 3), strides=None, padding='valid', data_format='channels_last'
Step4: [pooling.MaxPooling3D.4] input 4x4x4x2, pool_size=(3, 3, 3), strides=(3, 3, 3), padding='valid', data_format='channels_last'
Step5: [pooling.MaxPooling3D.5] input 4x4x4x2, pool_size=(2, 2, 2), strides=None, padding='same', data_format='channels_last'
Step6: [pooling.MaxPooling3D.6] input 4x4x4x2, pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same', data_format='channels_last'
Step7: [pooling.MaxPooling3D.7] input 4x5x4x2, pool_size=(2, 2, 2), strides=(1, 2, 1), padding='same', data_format='channels_last'
Step8: [pooling.MaxPooling3D.8] input 4x4x4x2, pool_size=(3, 3, 3), strides=None, padding='same', data_format='channels_last'
Step9: [pooling.MaxPooling3D.9] input 4x4x4x2, pool_size=(3, 3, 3), strides=(3, 3, 3), padding='same', data_format='channels_last'
Step10: [pooling.MaxPooling3D.10] input 2x3x3x4, pool_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_first'
Step11: [pooling.MaxPooling3D.11] input 2x3x3x4, pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_first'
Step12: [pooling.MaxPooling3D.12] input 3x4x4x3, pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first'
Step13: export for Keras.js tests
|
4,338
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import skrf as rf
rf.stylely()
# imports
from scipy.constants import mil,c
from skrf.media import RectangularWaveguide, Freespace
from skrf.frequency import Frequency
import matplotlib as mpl
# plot formating
mpl.rcParams['lines.linewidth'] = 2
# create frequency objects for standard bands
f_wr5p1 = Frequency(140,220,1001, 'ghz')
f_wr3p4 = Frequency(220,330,1001, 'ghz')
f_wr2p2 = Frequency(330,500,1001, 'ghz')
f_wr1p5 = Frequency(500,750,1001, 'ghz')
f_wr1 = Frequency(750,1100,1001, 'ghz')
# create rectangular waveguide objects
wr5p1 = RectangularWaveguide(f_wr5p1.copy(), a=51*mil, b=25.5*mil, rho = 'au')
wr3p4 = RectangularWaveguide(f_wr3p4.copy(), a=34*mil, b=17*mil, rho = 'au')
wr2p2 = RectangularWaveguide(f_wr2p2.copy(), a=22*mil, b=11*mil, rho = 'au')
wr1p5 = RectangularWaveguide(f_wr1p5.copy(), a=15*mil, b=7.5*mil, rho = 'au')
wr1 = RectangularWaveguide(f_wr1.copy(), a=10*mil, b=5*mil, rho = 'au')
# add names to waveguide objects for use in plot legends
wr5p1.name = 'WR-5.1'
wr3p4.name = 'WR-3.4'
wr2p2.name = 'WR-2.2'
wr1p5.name = 'WR-1.5'
wr1.name = 'WR-1.0'
# create a list to iterate through
wg_list = [wr5p1, wr3p4,wr2p2,wr1p5,wr1]
# creat a freespace object too
freespace = Freespace(Frequency(125,1100, 1001))
freespace.name = 'Free Space'
from pylab import *
for wg in wg_list:
wg.frequency.plot(rf.np_2_db(wg.alpha), label=wg.name )
legend()
xlabel('Frequency(GHz)')
ylabel('Loss (dB/m)')
title('Loss in Rectangular Waveguide (Au)');
xlim(100,1300)
resistivity_list = linspace(1,10,5)*1e-8 # ohm meter
for rho in resistivity_list:
wg = RectangularWaveguide(f_wr1.copy(), a=10*mil, b=5*mil,
rho = rho)
wg.frequency.plot(rf.np_2_db(wg.alpha),label=r'$ \rho $=%.e$ \Omega m$'%rho )
legend()
#ylim(.0,20)
xlabel('Frequency(GHz)')
ylabel('Loss (dB/m)')
title('Loss vs. Resistivity in\nWR-1.0 Rectangular Waveguide');
for wg in wg_list:
wg.frequency.plot(100*wg.v_p.real/c, label=wg.name )
legend()
ylim(50,200)
xlabel('Frequency(GHz)')
ylabel('Phase Velocity (\%c)')
title('Phase Veclocity in Rectangular Waveguide');
for wg in wg_list:
plt.plot(wg.frequency.f_scaled[1:],
100/c*diff(wg.frequency.w)/diff(wg.beta),
label=wg.name )
legend()
ylim(50,100)
xlabel('Frequency(GHz)')
ylabel('Group Velocity (\%c)')
title('Phase Veclocity in Rectangular Waveguide');
for wg in wg_list+[freespace]:
wg.frequency.plot(wg.beta, label=wg.name )
legend()
xlabel('Frequency(GHz)')
ylabel('Propagation Constant (rad/m)')
title('Propagation Constant \nin Rectangular Waveguide');
semilogy();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Conductor Loss
Step2: Phase Velocity
Step3: Propagation Constant
|
4,339
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from emo_utils import *
import emoji
import matplotlib.pyplot as plt
%matplotlib inline
X_train, Y_train = read_csv('data/train_emoji.csv')
X_test, Y_test = read_csv('data/tesss.csv')
maxLen = len(max(X_train, key=len).split())
index = 1
print(X_train[index], label_to_emoji(Y_train[index]))
Y_oh_train = convert_to_one_hot(Y_train, C = 5)
Y_oh_test = convert_to_one_hot(Y_test, C = 5)
index = 50
print(Y_train[index], "is converted into one hot", Y_oh_train[index])
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')
word = "cucumber"
index = 289846
print("the index of", word, "in the vocabulary is", word_to_index[word])
print("the", str(index) + "th word in the vocabulary is", index_to_word[index])
# GRADED FUNCTION: sentence_to_avg
def sentence_to_avg(sentence, word_to_vec_map):
Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word
and averages its value into a single vector encoding the meaning of the sentence.
Arguments:
sentence -- string, one training example from X
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
Returns:
avg -- average vector encoding information about the sentence, numpy-array of shape (50,)
### START CODE HERE ###
# Step 1: Split sentence into list of lower case words (≈ 1 line)
words = sentence.lower().split()
# Initialize the average word vector, should have the same shape as your word vectors.
avg = np.zeros((50, ))
# Step 2: average the word vectors. You can loop over the words in the list "words".
for w in words:
avg += word_to_vec_map[w]
avg = avg / len(words)
### END CODE HERE ###
return avg
avg = sentence_to_avg("Morrocan couscous is my favorite dish", word_to_vec_map)
print("avg = ", avg)
# GRADED FUNCTION: model
def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):
Model to train word vector representations in numpy.
Arguments:
X -- input data, numpy array of sentences as strings, of shape (m, 1)
Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
learning_rate -- learning_rate for the stochastic gradient descent algorithm
num_iterations -- number of iterations
Returns:
pred -- vector of predictions, numpy-array of shape (m, 1)
W -- weight matrix of the softmax layer, of shape (n_y, n_h)
b -- bias of the softmax layer, of shape (n_y,)
np.random.seed(1)
# Define number of training examples
m = Y.shape[0] # number of training examples
n_y = 5 # number of classes
n_h = 50 # dimensions of the GloVe vectors
# Initialize parameters using Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Convert Y to Y_onehot with n_y classes
Y_oh = convert_to_one_hot(Y, C = n_y)
# Optimization loop
for t in range(num_iterations): # Loop over the number of iterations
for i in range(m): # Loop over the training examples
### START CODE HERE ### (≈ 4 lines of code)
# Average the word vectors of the words from the i'th training example
avg = sentence_to_avg(X[i], word_to_vec_map)
# Forward propagate the avg through the softmax layer
z = np.dot(W, avg) + b
a = softmax(z)
# Compute cost using the i'th training label's one hot representation and "A" (the output of the softmax)
cost = - np.dot(Y_oh[i], np.log(a))
### END CODE HERE ###
# Compute gradients
dz = a - Y_oh[i]
dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))
db = dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map)
return pred, W, b
print(X_train.shape)
print(Y_train.shape)
print(np.eye(5)[Y_train.reshape(-1)].shape)
print(X_train[0])
print(type(X_train))
Y = np.asarray([5,0,0,5, 4, 4, 4, 6, 6, 4, 1, 1, 5, 6, 6, 3, 6, 3, 4, 4])
print(Y.shape)
X = np.asarray(['I am going to the bar tonight', 'I love you', 'miss you my dear',
'Lets go party and drinks','Congrats on the new job','Congratulations',
'I am so happy for you', 'Why are you feeling bad', 'What is wrong with you',
'You totally deserve this prize', 'Let us go play football',
'Are you down for football this afternoon', 'Work hard play harder',
'It is suprising how people can be dumb sometimes',
'I am very disappointed','It is the best day in my life',
'I think I will end up alone','My life is so boring','Good job',
'Great so awesome'])
print(X.shape)
print(np.eye(5)[Y_train.reshape(-1)].shape)
print(type(X_train))
pred, W, b = model(X_train, Y_train, word_to_vec_map)
print(pred)
print("Training set:")
pred_train = predict(X_train, Y_train, W, b, word_to_vec_map)
print('Test set:')
pred_test = predict(X_test, Y_test, W, b, word_to_vec_map)
X_my_sentences = np.array(["i adore you", "i love you", "funny lol", "lets play with a ball", "food is ready", "not feeling happy"])
Y_my_labels = np.array([[0], [0], [2], [1], [4],[3]])
pred = predict(X_my_sentences, Y_my_labels , W, b, word_to_vec_map)
print_predictions(X_my_sentences, pred)
print(Y_test.shape)
print(' '+ label_to_emoji(0)+ ' ' + label_to_emoji(1) + ' ' + label_to_emoji(2)+ ' ' + label_to_emoji(3)+' ' + label_to_emoji(4))
print(pd.crosstab(Y_test, pred_test.reshape(56,), rownames=['Actual'], colnames=['Predicted'], margins=True))
plot_confusion_matrix(Y_test, pred_test)
import numpy as np
np.random.seed(0)
from keras.models import Model
from keras.layers import Dense, Input, Dropout, LSTM, Activation
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.initializers import glorot_uniform
np.random.seed(1)
# GRADED FUNCTION: sentences_to_indices
def sentences_to_indices(X, word_to_index, max_len):
Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences.
The output shape should be such that it can be given to `Embedding()` (described in Figure 4).
Arguments:
X -- array of sentences (strings), of shape (m, 1)
word_to_index -- a dictionary containing the each word mapped to its index
max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this.
Returns:
X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len)
m = X.shape[0] # number of training examples
### START CODE HERE ###
# Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)
X_indices = np.zeros((m, max_len))
for i in range(m): # loop over training examples
# Convert the ith training sentence in lower case and split is into words. You should get a list of words.
sentence_words = X[i].lower().split()
# Initialize j to 0
j = 0
# Loop over the words of sentence_words
for w in sentence_words:
# Set the (i,j)th entry of X_indices to the index of the correct word.
X_indices[i, j] = word_to_index[w]
# Increment j to j + 1
j = j + 1
### END CODE HERE ###
return X_indices
X1 = np.array(["funny lol", "lets play baseball", "food is ready for you"])
X1_indices = sentences_to_indices(X1,word_to_index, max_len = 5)
print("X1 =", X1)
print("X1_indices =", X1_indices)
# GRADED FUNCTION: pretrained_embedding_layer
def pretrained_embedding_layer(word_to_vec_map, word_to_index):
Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors.
Arguments:
word_to_vec_map -- dictionary mapping words to their GloVe vector representation.
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
embedding_layer -- pretrained layer Keras instance
vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)
emb_dim = word_to_vec_map["cucumber"].shape[0] # define dimensionality of your GloVe word vectors (= 50)
### START CODE HERE ###
# Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)
emb_matrix = np.zeros((vocab_len, emb_dim))
# Set each row "index" of the embedding matrix to be the word vector representation of the "index"th word of the vocabulary
for word, index in word_to_index.items():
emb_matrix[index, :] = word_to_vec_map[word]
# Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False.
embedding_layer = Embedding(vocab_len, emb_dim, trainable = False)
### END CODE HERE ###
# Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the "None".
embedding_layer.build((None,))
# Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.
embedding_layer.set_weights([emb_matrix])
return embedding_layer
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
print("weights[0][1][3] =", embedding_layer.get_weights()[0][1][3])
# GRADED FUNCTION: Emojify_V2
def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
### START CODE HERE ###
# Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).
sentence_indices = Input(shape = input_shape, dtype = 'int32')
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer, you get back the embeddings
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a batch of sequences.
X = LSTM(128, return_sequences = True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a single hidden state, not a batch of sequences.
X = LSTM(128, return_sequences = False)(X)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.
X = Dense(5, activation='softmax')(X)
# Add a softmax activation
X = Activation(activation='softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(inputs=sentence_indices, outputs=X)
### END CODE HERE ###
return model
model = Emojify_V2((maxLen,), word_to_vec_map, word_to_index)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
X_train_indices = sentences_to_indices(X_train, word_to_index, maxLen)
Y_train_oh = convert_to_one_hot(Y_train, C = 5)
model.fit(X_train_indices, Y_train_oh, epochs = 50, batch_size = 32, shuffle=True)
X_test_indices = sentences_to_indices(X_test, word_to_index, max_len = maxLen)
Y_test_oh = convert_to_one_hot(Y_test, C = 5)
loss, acc = model.evaluate(X_test_indices, Y_test_oh)
print()
print("Test accuracy = ", acc)
# This code allows you to see the mislabelled examples
C = 5
y_test_oh = np.eye(C)[Y_test.reshape(-1)]
X_test_indices = sentences_to_indices(X_test, word_to_index, maxLen)
pred = model.predict(X_test_indices)
for i in range(len(X_test)):
x = X_test_indices
num = np.argmax(pred[i])
if(num != Y_test[i]):
print('Expected emoji:'+ label_to_emoji(Y_test[i]) + ' prediction: '+ X_test[i] + label_to_emoji(num).strip())
# Change the sentence below to see your prediction. Make sure all the words are in the Glove embeddings.
x_test = np.array(['not feeling happy'])
X_test_indices = sentences_to_indices(x_test, word_to_index, maxLen)
print(x_test[0] +' '+ label_to_emoji(np.argmax(model.predict(X_test_indices))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 - Baseline model
Step2: Run the following cell to print sentences from X_train and corresponding labels from Y_train. Change index to see different examples. Because of the font the iPython notebook uses, the heart emoji may be colored black rather than red.
Step3: 1.2 - Overview of the Emojifier-V1
Step4: Let's see what convert_to_one_hot() did. Feel free to change index to print out different values.
Step5: All the data is now ready to be fed into the Emojify-V1 model. Let's implement the model!
Step6: You've loaded
Step8: Exercise
Step10: Expected Output
Step11: Run the next cell to train your model and learn the softmax parameters (W,b).
Step12: Expected Output (on a subset of iterations)
Step13: Expected Output
Step14: Amazing! Because adore has a similar embedding as love, the algorithm has generalized correctly even to a word it has never seen before. Words such as heart, dear, beloved or adore have embedding vectors similar to love, and so might work too---feel free to modify the inputs above and try out a variety of input sentences. How well does it work?
Step15: <font color='blue'>
Step17: 2.1 - Overview of the model
Step18: Run the following cell to check what sentences_to_indices() does, and check your results.
Step20: Expected Output
Step22: Expected Output
Step23: Run the following cell to create your model and check its summary. Because all sentences in the dataset are less than 10 words, we chose max_len = 10. You should see your architecture, it uses "20,223,927" parameters, of which 20,000,050 (the word embeddings) are non-trainable, and the remaining 223,877 are. Because our vocabulary size has 400,001 words (with valid indices from 0 to 400,000) there are 400,001*50 = 20,000,050 non-trainable parameters.
Step24: As usual, after creating your model in Keras, you need to compile it and define what loss, optimizer and metrics your are want to use. Compile your model using categorical_crossentropy loss, adam optimizer and ['accuracy'] metrics
Step25: It's time to train your model. Your Emojifier-V2 model takes as input an array of shape (m, max_len) and outputs probability vectors of shape (m, number of classes). We thus have to convert X_train (array of sentences as strings) to X_train_indices (array of sentences as list of word indices), and Y_train (labels as indices) to Y_train_oh (labels as one-hot vectors).
Step26: Fit the Keras model on X_train_indices and Y_train_oh. We will use epochs = 50 and batch_size = 32.
Step27: Your model should perform close to 100% accuracy on the training set. The exact accuracy you get may be a little different. Run the following cell to evaluate your model on the test set.
Step28: You should get a test accuracy between 80% and 95%. Run the cell below to see the mislabelled examples.
Step29: Now you can try it on your own example. Write your own sentence below.
|
4,340
|
<ASSISTANT_TASK:>
Python Code:
import cvxpy as cp
x = cp.Variable(pos=True)
y = cp.Variable(pos=True)
z = cp.Variable(pos=True)
a = cp.Parameter(pos=True)
b = cp.Parameter(pos=True)
c = cp.Parameter()
objective_fn = 1/(x*y*z)
objective = cp.Minimize(objective_fn)
constraints = [a*(x*y + x*z + y*z) <= b, x >= y**c]
problem = cp.Problem(objective, constraints)
problem.is_dgp(dpp=True)
a.value = 2.0
b.value = 1.0
c.value = 0.5
problem.solve(gp=True, requires_grad=True)
print(x.value)
print(y.value)
print(z.value)
da, db, dc = 1e-2, 1e-2, 1e-2
a.delta = da
b.delta = db
c.delta = dc
problem.derivative()
x_hat = x.value + x.delta
y_hat = y.value + y.delta
z_hat = z.value + z.delta
a.value += da
b.value += db
c.value += dc
problem.solve(gp=True)
print('x: predicted {0:.5f} actual {1:.5f}'.format(x_hat, x.value))
print('y: predicted {0:.5f} actual {1:.5f}'.format(y_hat, y.value))
print('z: predicted {0:.5f} actual {1:.5f}'.format(z_hat, z.value))
a.value -= da
b.value -= db
c.value -= dc
problem.solve(gp=True, requires_grad=True)
def f(x, y, z):
return 1/2*(x**2 + y**2 + z**2)
original = f(x, y, z).value
x.gradient = x.value
y.gradient = y.value
z.gradient = z.value
problem.backward()
eta = 0.5
dalpha = cp.vstack([a.gradient, b.gradient, c.gradient])
predicted = float((original - eta*dalpha.T @ dalpha).value)
a.value -= eta*a.gradient
b.value -= eta*b.gradient
c.value -= eta*c.gradient
problem.solve(gp=True)
actual = f(x, y, z).value
print('original {0:.5f} predicted {1:.5f} actual {2:.5f}'.format(
original, predicted, actual))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notice the keyword argument dpp=True. The parameters must enter in the DGP problem acording to special rules, which we refer to as dpp. The DPP rules are described in an online tutorial.
Step2: Notice the keyword argument requires_grad=True; this is necessary to subsequently compute derivatives.
Step3: The derivative method populates the delta attributes of the variables as a side-effect, with the predicted change in the variable. We can compare the predictions to the actual solution of the perturbed problem.
Step4: In this case, the predictions and the actual solutions are fairly close.
|
4,341
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = sorted(set(text))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
len(vocab)
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
chars_per_batch = batch_size * n_steps
n_batches = len(arr)//chars_per_batch
# Keep only enough characters to make full batches
arr = arr[:n_batches * chars_per_batch]
# Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y_temp = arr[:, n+1:n+n_steps+1]
# For the very last batch, y will be one character short at the end of
# the sequences which breaks things. To get around this, I'll make an
# array of the appropriate size first, of all zeros, then add the targets.
# This will introduce a small artifact in the last batch, but it won't matter.
y = np.zeros(x.shape, dtype=x.dtype)
y[:,:y_temp.shape[1]] = y_temp
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
def build_cell(lstm_size, keep_prob):
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([build_cell(lstm_size, keep_prob) for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
x: Input tensor
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# That is, the shape should be batch_size*num_steps rows by lstm_size columns
seq_output = tf.concat(lstm_output, axis=1)
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per batch_size per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# Print losses every N interations
print_every_n = 50
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
if (counter % print_every_n == 0):
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training mini-batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: If you implemented get_batches correctly, the above output should look something like
Step8: LSTM Cell
Step9: RNN Output
Step10: Training loss
Step11: Optimizer
Step12: Build the network
Step13: Hyperparameters
Step14: Time for training
Step15: Saved checkpoints
Step16: Sampling
Step17: Here, pass in the path to a checkpoint and sample from the network.
|
4,342
|
<ASSISTANT_TASK:>
Python Code:
# dogscats/
# train/
# dogs/
# cats/
# valid/
# dogs/
# cats/
# test/
# dogs/
# cats/
# sample/
# <same as for dogscats/>
# Making sure python knows where to look for my utilities folder (it's in this notebook's parent folder).
# I know the naming can be confusing, but it's to not get in the way of the imports below
from os import path as ospath, getcwd; from sys import path as syspath
current_dir = getcwd()
syspath.insert(1, ospath.join(current_dir, '../utils'))
syspath.insert(1, ospath.join(current_dir, '..'))
# this ensures matplotlib plots within this window
%matplotlib inline
# path to my sample data directory. Contains 180 imgs in ../train/ and 20
# imgs in ../valid/ The full dataset contains 25,000 imgs.
# path = 'data/dogscats/sample/'
path = 'data/dogscats/'
# The libraries to be imported (taken from https://github.com/fastai/courses/blob/master/deeplearning1/nbs/lesson1.ipynb)
from __future__ import division, print_function
import os, json
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
import utils; reload(utils)
from utils import plots
import vgg16; reload(vgg16)
from vgg16 import Vgg16
vgg = Vgg16()
batches = vgg.get_batches(path+'train',batch_size=4)
imgs, labels = next(batches)
plots(imgs, titles=labels)
vgg.classes[:5]
batch_size = 32
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
# Save the weights from the first epoch
vgg.model.save_weights(path + 'ft1.hd5')
nb_epoch = 3
vgg.model.optimizer.lr = 0.01
for epoch in xrange(nb_epoch):
print "Starting Epoch %d" % epoch
vgg.fit(batches, val_batches, nb_epoch=nb_epoch)
print "Completed %d fit operations" % nb_epoch
path = 'data/dogscats/'
batch_size = 32
batches, predictions = vgg.test(path+'test',batch_size=batch_size)
filenames = batches.filenames
vgg.model.load_weights(path+'ft1.hd5')
# ids = np.array([int(f[f.find('.')+1:-4]) for f in filenames])
ids = np.array([int(f[8:f.find('.')]) for f in filenames])
dog_predictions = predictions[:,1]
# NOTE: this is important due to logloss error: clip the probabilities
dog_predictions = dog_predictions.clip(min=0.05, max=0.95)
# dog_predictions = np.clip(dog_predictions, 0.05, 0.95) # in case .clip doesn't work
# you know what's funny, as I'm writing this I have no clue where the clip function is coming from..
# Python? NumPy? Utils?
submission = np.stack([ids, dog_predictions], axis=1)
submission_file_name = 'submission_HW1_.csv'
np.savetxt(submission_file_name, submission, fmt='%d,%.5f',header='id,label',comments='')
%pwd
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup matplotlib to plot inside the notebook, import NumPy, SciPy, VGG, and the appropriate Keras libraries.
Step2: Vgg16 Image Recognition Example
Step3: Get some batches from the sample training folder. vgg.get_batches(..) is an iterator that returns a batch of images and their labels, indefinitely.
Step4: To get an idea of the categories Vgg16 usually works with, here're the first 5
Step5: Vgg16 finetuned for Dogs vs Cats
Step6: Calling fineunte() modifies the model so that it'll be trained based on the data in the batches provided
Step7: More training
Step8: The model is ready to classify dogs vs cats. In order to complete this assignment, we'll have to create a csv (comma-separated-values) file of IDs and labels of the full 25,000 set of images and upload it to Kaggle for scoring.
Step9: vgg.test(..) will return the entire set of batches & predictions (labels). We'll extract the ID-number from batches.filenames, and the label from the second column of predictions.
Step10: The syntax for getting the IDs below is
|
4,343
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import os
import tempfile
import pandas as pd
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform import beam as tft_beam
import tfx_utils
from tfx.utils import io_utils
from tensorflow_metadata.proto.v0 import schema_pb2
# For DatasetMetadata boilerplate
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
tf.get_logger().propagate = False
def _make_default_sqlite_uri(pipeline_name):
return os.path.join(os.environ['HOME'], 'airflow/tfx/metadata', pipeline_name, 'metadata.db')
def get_metadata_store(pipeline_name):
return tfx_utils.TFXReadonlyMetadataStore.from_sqlite_db(_make_default_sqlite_uri(pipeline_name))
pipeline_name = 'taxi'
pipeline_db_path = _make_default_sqlite_uri(pipeline_name)
print('Pipeline DB:\n{}'.format(pipeline_db_path))
store = get_metadata_store(pipeline_name)
# Get the schema URI from the metadata store
schemas = store.get_artifacts_of_type_df(tfx_utils.TFXArtifactTypes.SCHEMA)
assert len(schemas.URI) == 1
schema_uri = schemas.URI.iloc[0] + 'schema.pbtxt'
print ('Schema URI:\n{}'.format(schema_uri))
schema_proto = io_utils.parse_pbtxt_file(file_name=schema_uri, message=schema_pb2.Schema())
feature_spec, domains = schema_utils.schema_as_feature_spec(schema_proto)
legacy_metadata = dataset_metadata.DatasetMetadata(dataset_schema.from_feature_spec(feature_spec, domains))
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
LABEL_KEY = 'tips'
FARE_KEY = 'fare'
def transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
Small utility returning a record reader that can read gzip'ed files.
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _fill_in_missing(x):
Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
outputs = {}
for key in DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=VOCAB_SIZE,
num_oov_buckets=OOV_SIZE)
for key in BUCKET_FEATURE_KEYS:
outputs[transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), FEATURE_BUCKET_COUNT,
always_return_num_quantiles=False)
for key in CATEGORICAL_FEATURE_KEYS:
outputs[transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[FARE_KEY])
tips = _fill_in_missing(inputs[LABEL_KEY])
outputs[transformed_name(LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
from IPython.display import display
with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
raw_examples = [
{
"fare": [100.0],
"trip_start_hour": [12],
"pickup_census_tract": ['abcd'],
"dropoff_census_tract": [12345.0], # No idea why this is a float
"company": ['taxi inc.'],
"trip_start_timestamp": [123456],
"pickup_longitude": [12.0],
"trip_start_month": [5],
"trip_miles": [8.0],
"dropoff_longitude": [12.05],
"dropoff_community_area": [123],
"pickup_community_area": [123],
"payment_type": ['visa'],
"trip_seconds": [600.0],
"trip_start_day": [12],
"tips": [10.0],
"pickup_latitude": [80.0],
"dropoff_latitude": [80.01],
}
]
(transformed_examples, transformed_metadata), transform_fn = (
(raw_examples, legacy_metadata)
| 'AnalyzeAndTransform' >> tft_beam.AnalyzeAndTransformDataset(
preprocessing_fn))
display(pd.DataFrame(transformed_examples))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the schema URI from the metadata store
Step2: Get the schema that was inferred by TensorFlow Data Validation
Step6: Define features and create functions for TensorFlow Transform
Step7: Display the results of transforming some example data
|
4,344
|
<ASSISTANT_TASK:>
Python Code:
## Set up the environment
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
## Load the data set
titanic_df = pd.read_csv('titanic_data.csv')
## Let's see what the data looks like
## First let's see what the columns are.
titanic_df.info()
## Let's look at the first few rows of data.
titanic_df.head(10)
## Let's look at some basic statistics for the data.
titanic_df.describe()
# Please note that I copied and pasted thecode below from the above link
# Compute the correlation matrix
corr = titanic_df.corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=None, cmap=cmap, vmax=.3,
square=True, xticklabels=True, yticklabels=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
# Let's start by counting how many females and males we have.
print titanic_df['Sex'].value_counts()
# Let's also look at a bar graph for comparison.
mf_plot = sns.factorplot('Sex', data=titanic_df, kind='count')
mf_plot.set_ylabels("Number of Passengers")
# Create a bar chart breaking down the passengers by gender and class
mf_class_plot= sns.factorplot('Pclass',order=[1,2,3], data=titanic_df, hue='Sex', kind='count')
mf_class_plot.set_xlabels("Passenger Class")
mf_class_plot.set_ylabels("Number of Passengers")
# Spread of passenger ages
print "Youngest Passenger Age: " + str(titanic_df['Age'].min())
print "Oldest Passenger Age: " + str(titanic_df['Age'].max())
print "Mean Passenger Age: " + str(titanic_df['Age'].mean())
print "Median Passenger Age: " + str(titanic_df['Age'].median())
# Histogram of ages
age_hist = titanic_df['Age'].hist(bins=80)
age_hist.set(xlabel="Age", ylabel="Number of Passengers")
# Determine the median age for males and for females
print "Median age for males: " + str(titanic_df[titanic_df.Sex == 'male']['Age'].median())
print "Median age for females: " + str(titanic_df[titanic_df.Sex == 'female']['Age'].median())
# Use the above value to impute the missing ages.
# If the age is missing replace `NaN` with 28.0.
# Otherwise, keep the age that is already there.
imputed_age = np.where(titanic_df['Age'].isnull(), 28.0, titanic_df['Age'])
# Replace the `NaN`s with 28.0 otherwise don't replace the age.
titanic_df['Age'] = imputed_age
titanic_df.describe()
# Define a function that determines whether a passenger is an adult or a child.
# If a passenger is an adult, then that passenger is labeled according to `Sex`.
# Note that if the age is missing (entered as 'Nan' in the Age column), the the passenger will be consdered an adult.
def adult_or_child(passenger):
age,sex = passenger
if age <= 12:
return 'child'
else:
return sex
# Add a column called 'Majority' to the data set indicating whether each passenger is an adult or a child.
titanic_df['Majority'] = titanic_df[['Age','Sex']].apply(adult_or_child, axis=1)
# Let's see what our numbers look like now.
titanic_df['Majority'].value_counts()
# Create a bar chart breaking down the passengers by gender and class.
majority_class_plot= sns.factorplot('Pclass',order=[1,2,3], data=titanic_df, hue='Majority', kind='count')
majority_class_plot.set_xlabels("Passenger Class")
majority_class_plot.set_ylabels("Number of Passengers")
# Define a function that determines whether a passenger is traveling with a family or alone.
def family_or_alone(passenger):
sibsp,parch = passenger
if sibsp + parch > 0:
return 'family'
else:
return 'alone'
# Add a column called 'FamAlone' to the data set indicating whether each passenger traveled with a family or alone.
titanic_df['FamAlone'] = titanic_df[['SibSp','Parch']].apply(family_or_alone, axis=1)
# Let's see what the numbers for family affiliation look like.
titanic_df['FamAlone'].value_counts()
# Create a bar chart breaking down the passengers by family affiliation.
family_plot= sns.factorplot('Pclass',order=[1,2,3], data=titanic_df, hue='FamAlone', kind='count')
family_plot.set_xlabels("Family or Alone")
family_plot.set_ylabels("Number of Passengers")
# Create a new column `Survivor` which translates the values of `Survived' as follows: 0 becomes 'No' and 1 becomes 'Yes'
titanic_df['Survivor']= titanic_df.Survived.map({0:'No', 1:'Yes'})
# Calculate the proportions of survivors and fatalities.
# Note that normalize = False just returns the nubers rather than the proportions.
titanic_df['Survivor'].value_counts(normalize = True)
# Plot the numbers of those who survived and didn't.
survival_plot = sns.factorplot('Survivor', data=titanic_df, kind='count')
survival_plot.set_xlabels("Survived")
survival_plot.set_ylabels("Number of Passengers")
# Calculate the proportions of survivors by gender.
gender_survival = titanic_df.groupby(['Sex'])['Survivor']
gender_survival.value_counts(normalize = True)
# Graph the above results.
gender_survival_plot = sns.factorplot("Sex", "Survived", data=titanic_df, kind="bar")
# Calculate the proportions of survivors by class.
class_survival = titanic_df.groupby(['Pclass'])['Survivor']
class_survival.value_counts(normalize = True)
# Graph the above results.
class_survival_plot = sns.factorplot("Pclass", "Survived", data=titanic_df, kind="bar")
# Calculate the proportions of survivors by class and gender.
class_gender_survival = titanic_df.groupby(['Pclass','Sex'])['Survivor']
class_gender_survival.value_counts(normalize = True)
# Graph the results from above
sns.factorplot("Pclass", "Survived",order=[1,2,3],data=titanic_df,hue='Sex', kind='bar')
# Calculate the proportions of survivors by family affiliation.
class_survival = titanic_df.groupby(['FamAlone'])['Survivor']
class_survival.value_counts(normalize = True)
# Graph the results from above.
class_survival_plot = sns.factorplot("FamAlone", "Survived", data=titanic_df, kind="bar")
# Calculate the proportions of survivors by class and familial affiliation.
class_family_survival = titanic_df.groupby(['Pclass','FamAlone'])['Survivor']
class_family_survival.value_counts(normalize = True)
# Graph the results from above
sns.factorplot("Pclass", "Survived",order=[1,2,3],data=titanic_df,hue='FamAlone', kind='bar')
# Graph the box plots for the ages.
age_survival_box_plot = sns.factorplot("Survived", "Age", data=titanic_df, kind="box", size=6)
age_survival_box_plot.set( xlabel='', xticklabels = ['Died', 'Survived'])
# Calculate the proportions of survivors by class and familial affiliation.
class_family_survival = titanic_df.groupby(['Pclass','Majority'])['Survivor']
class_family_survival.value_counts(normalize = True)
# Graph the box plots for the ages by class.
majority_survival_box_plot = sns.factorplot("Survived", "Age", data=titanic_df, kind="box", size=8, hue='Pclass')
majority_survival_box_plot.set( xlabel='', xticklabels = ['Died', 'Survived'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initial Inspection of the Data
Step2: An initial inspection of the data shows us that we have data for 891 passengers not all of whose ages are known. Passengers in 2nd and 3rd class do not appear to have cabins assigned to them. The majority of the passengers (in the data set) were in 3rd class. This is based on the upper 50th percentile for pclass being 3 so the lower 50th percentile must include the 1st and 2nd class passengers combined. It would appear that the most interesting/relevant factors to look at are
Step3: There are no surprising strong correlations. There is a negative correlation between Survived and Pclass.
Step4: We see that there are almost twice as many male passengers as female passengers in the data set. Keep this in mind when we look at survival rates for males and females.
Step5: For this dataset, the majority of males were in 3rd class, whereas the females appear to be more evenly spread amongst the three classes. Although, there are still more females in 3rd class, than in 1st or 2nd class.
Step6: As noted earlier, some of the ages are missing - 177 records to be exact. We cannot ignore this many rows so we must come up with a way to impute the missing ages. One solution would be to fill in the missing ages with the median ages according to gender. This is not ideal, but it's better than excluding those rows outright.
Step7: Since the median age of all passengers, 28.0, is the median of the above two median ages, we will fill in the NaNs with 28.0.
Step8: I want to determine whether a passenger was an adult or a child. To this end, I add a new column to the data set indicating each passenger's status (at least for those passengers that have an age).
Step9: Let's see what the breakdown is for adults and children by class.
Step10: Exploration of Data by Family Affiliation
Step11: Let's breakdown family affiliation by class.
Step12: Looking at the above graph, we see that in 3rd class, more passengers traveled alone whereas in the 1st class and 2nd class, roughly the same number of passengers traveled wth families as did those who traveled alone.
Step13: Clearly the majority of passengers perished in the data set. Next, we compare survival rates by gender.
Step14: There is a notable difference in survival rates between genders with a rate of roughly 74.2% for females and only 18.9% for males.
Step15: There are notable differences in survival rates between passenger classes with a survival rate of greater than 50% for first class passengers, less than 50% for second class passengers, and less than 25% for third class passengers.
Step16: We observe that females in a particular class had noticeably greater surival rates than males in the same class. In first class, the rate of survial for females was greater than 96% (but slightly uunder 32% for males). In second class, the rate of survial for females was greater than 92% (but slightly under 16% for males). In third class, the rate of survial for females was roughly 50% (but under 14% for males).
Step17: Those who traveled alone had a survival rate of roughly 30.4%, whereas those who traveled with families had a survival rate of roughly 50.6%. Let's break this down further by class.
Step18: We observe that passengers in families survived at higher rates than those who traveled alone, irrespective of class.
Step19: Let's compare survival rates by class.
Step20: Children in first and second classes had much higher survival rates than children in third class. Similarly women in first and second classes fared better than women in third class. Men in first class had a notably higher rate of survival than men in second and third classes.
|
4,345
|
<ASSISTANT_TASK:>
Python Code:
class Student:
count = 0 # Total number of objects created so far, it is static variable as it is declared outside
def __init__(self,name,usn,marks):
Constructor of class Student
Input: name - name of the student : string
usn - university serial number : string
marks - marks in 3 subjects out of 20
Student.count += 1
self.name = name
self.usn = usn
self.marks = marks[:] # Copy marks to self.marks .. a simple self.marks = marks make only reference equal
def print_details(self):
print(str(self))
def total_marks(self):
return sum(self.marks)
def __iter__(self):
details = {'name':self.name,'usn':self.usn,'marks':self.marks}
for k,v in details.items():
yield k,v # A tuple
def __str__(self):
return "Name : {0} \nUSN = {1}\nMarks in 3 subjects = {2}".format(self.name,self.usn,self.marks)
@staticmethod
def get_total_count():
return Student.count
s1 = Student('Ramesh','4jc11cs111',[20,16,18])
s2 = Student('Ravi','4jc15cs112',[15,18,18])
print(s1) # calls __str__()
print(s2)
Student.count
Student.get_total_count()
for k,v in s1:
print('{} = {}'.format(k,v))
s1.print_details() # self of Student.print_details(self) is passed as s1
Student.print_details(s1) # Explicitly passing self parameter
Student.get_total_count()
s1.get_total_count() # This is also possible, @staticmethod attribute prevents passing object to method
class Duck:
def quack(self):
print("Quaaaaaack!")
def feathers(self):
print("The duck has white and gray feathers.")
class Person:
def quack(self):
print("The person imitates a duck.")
def feathers(self):
print("The person takes a feather from the ground and shows it.")
def name(self):
print("John Smith")
def in_the_forest(duck):
duck.quack()
duck.feathers()
def game():
donald = Duck()
john = Person()
in_the_forest(donald)
in_the_forest(john)
game()
x = 8
type(x)
type(8.5)
type('hello')
type([1,2,1])
type({})
type((1,))
type(s1)
import random
type(random)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Object Oriented Programming
Step2: Duck Typing and Interfaces
Step3: type() - Obtaining the data type of a variable
|
4,346
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import pandas as pd
import sys
sys.path.append("../../../bayesianpy")
import bayesianpy
from bayesianpy.network import Builder as builder
import logging
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
# Using the latent variable to cluster data points. Based upon the Iris dataset which has 3 distinct clusters
# (not all of which are linearly separable). Using a joint probability distribution, first based upon the class
# variable 'iris_class' and subsequently the cluster variable as a tail variable. Custom query currently only supports
# a single discrete tail variable and multiple continuous head variables.
jd = bayesianpy.visual.JointDistribution()
def plot(head_variables, results):
fig = plt.figure(figsize=(10, 10))
n = len(head_variables)-1
total = n*(n+1)/2
k = 1
for i, hv in enumerate(head_variables):
for j in range(i + 1, len(head_variables)):
ax = fig.add_subplot(total/2, 2, k)
jd.plot_distribution_with_covariance(ax, iris,
(head_variables[i], head_variables[j]), results)
k+=1
plt.show()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
bayesianpy.jni.attach(logger)
db_folder = bayesianpy.utils.get_path_to_parent_dir("")
iris = pd.read_csv(os.path.join(db_folder, "data/iris.csv"), index_col=False)
network = bayesianpy.network.create_network()
petal_length = builder.create_continuous_variable(network, "petal_length")
petal_width = builder.create_continuous_variable(network, "petal_width")
sepal_length = builder.create_continuous_variable(network, "sepal_length")
sepal_width = builder.create_continuous_variable(network, "sepal_width")
nodes = [petal_length, petal_width, sepal_length, sepal_width]
class_variable = builder.create_discrete_variable(network, iris, 'iris_class', iris['iris_class'].unique())
for i, node in enumerate(nodes):
builder.create_link(network, class_variable, node)
plt.figure()
layout = bayesianpy.visual.NetworkLayout(network)
graph = layout.build_graph()
pos = layout.fruchterman_reingold_layout(graph)
layout.visualise(graph, pos)
with bayesianpy.data.DataSet(iris, db_folder, logger) as dataset:
model = bayesianpy.model.NetworkModel(network, logger)
model.train(dataset)
with bayesianpy.data.DataSet(iris, db_folder, logger) as dataset:
head_variables = ['sepal_length','sepal_width','petal_length','petal_width']
query_type_class = bayesianpy.model.QueryConditionalJointProbability(
head_variables=head_variables,
tail_variables=['iris_class'])
engine = bayesianpy.model.InferenceEngine(network).create_engine()
# pass in an inference engine so that multiple queries can be performed, or evidence can be set.
query = bayesianpy.model.Query(network, engine, logger)
results_class = query.execute([query_type_class], aslist=False)
plot(head_variables, results_class)
network = bayesianpy.network.create_network()
petal_length = builder.create_continuous_variable(network, "petal_length")
petal_width = builder.create_continuous_variable(network, "petal_width")
sepal_length = builder.create_continuous_variable(network, "sepal_length")
sepal_width = builder.create_continuous_variable(network, "sepal_width")
nodes = [petal_length, petal_width, sepal_length, sepal_width]
class_variable = builder.create_discrete_variable(network, iris, 'iris_class', iris['iris_class'].unique())
for i, node in enumerate(nodes):
builder.create_link(network, class_variable, node)
for j in range(i+1, len(nodes)):
builder.create_link(network, node, nodes[j])
plt.figure()
layout = bayesianpy.visual.NetworkLayout(network)
graph = layout.build_graph()
pos = layout.fruchterman_reingold_layout(graph)
layout.visualise(graph, pos)
with bayesianpy.data.DataSet(iris, db_folder, logger) as dataset:
model = bayesianpy.model.NetworkModel(network, logger)
model.train(dataset)
head_variables = ['sepal_length','sepal_width','petal_length','petal_width']
query_type_class = bayesianpy.model.QueryConditionalJointProbability(
head_variables=head_variables,
tail_variables=['iris_class'])
engine = bayesianpy.model.InferenceEngine(network).create_engine()
# pass in an inference engine so that multiple queries can be performed, or evidence can be set.
query = bayesianpy.model.Query(network, engine, logger)
results_class = query.execute([query_type_class], aslist=False)
plot(head_variables, results_class)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, just a bit of setup code to load the data and setup the Jpype instance.
Step2: Naive Bayes
Step3: The network still needs training, so kick that off.
Step4: Now it gets interesting, as we can query the priors that the network has setup. GaussianMixtureQuery returns the same format of covariance matrix that would be output by numpy.conv.
Step5: Performance doesn't seem too bad with the naive Bayes model, however it's possible to note that the ellipses have 0 covariance, as each variable is independent of the other (apart from iris_class). To improve performance, the variables could be fully connected.
|
4,347
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample')
report.parse_folder(meg_path)
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='svg')
# to save report
# report.save('foobar.html', True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Do standard folder parsing (this can take a couple of minutes)
Step2: Add a custom section with an evoked slider
|
4,348
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
#Read files:
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
#Combine test and train into one file
train['source']='train'
test['source']='test'
data = pd.concat([train, test],ignore_index=True)
print train.shape, test.shape, data.shape
#Check missing values:
data.apply(lambda x: sum(x.isnull()))
#Numerical data summary:
data.describe()
#Number of unique values in each:
data.apply(lambda x: len(x.unique()))
#Filter categorical variables
categorical_columns = [x for x in data.dtypes.index if data.dtypes[x]=='object']
#Exclude ID cols and source:
categorical_columns = [x for x in categorical_columns if x not in ['Item_Identifier','Outlet_Identifier','source']]
#Print frequency of categories
for col in categorical_columns:
print '\nFrequency of Categories for varible %s'%col
print data[col].value_counts()
#Determine the average weight per item:
item_avg_weight = data.pivot_table(values='Item_Weight', index='Item_Identifier')
#Get a boolean variable specifying missing Item_Weight values
miss_bool = data['Item_Weight'].isnull()
#Impute data and check #missing values before and after imputation to confirm
print 'Orignal #missing: %d'% sum(miss_bool)
data.loc[miss_bool,'Item_Weight'] = data.loc[miss_bool,'Item_Identifier'].apply(lambda x: item_avg_weight[x])
print 'Final #missing: %d'% sum(data['Item_Weight'].isnull())
#Import mode function:
from scipy.stats import mode
#Determing the mode for each
outlet_size_mode = data.pivot_table(values='Outlet_Size', columns='Outlet_Type',aggfunc=(lambda x:mode(x).mode[0]) )
print 'Mode for each Outlet_Type:'
print outlet_size_mode
#Get a boolean variable specifying missing Item_Weight values
miss_bool = data['Outlet_Size'].isnull()
#Impute data and check #missing values before and after imputation to confirm
print '\nOrignal #missing: %d'% sum(miss_bool)
data.loc[miss_bool,'Outlet_Size'] = data.loc[miss_bool,'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
print sum(data['Outlet_Size'].isnull())
#Check the mean sales by type:
data.pivot_table(values='Item_Outlet_Sales',index='Outlet_Type')
#Determine average visibility of a product
visibility_avg = data.pivot_table(values='Item_Visibility', index='Item_Identifier')
#Impute 0 values with mean visibility of that product:
miss_bool = (data['Item_Visibility'] == 0)
print 'Number of 0 values initially: %d'%sum(miss_bool)
data.loc[miss_bool,'Item_Visibility'] = data.loc[miss_bool,'Item_Identifier'].apply(lambda x: visibility_avg[x])
print 'Number of 0 values after modification: %d'%sum(data['Item_Visibility'] == 0)
#Determine another variable with means ratio
data['Item_Visibility_MeanRatio'] = data.apply(lambda x: x['Item_Visibility']/visibility_avg[x['Item_Identifier']], axis=1)
print data['Item_Visibility_MeanRatio'].describe()
#Item type combine:
data['Item_Identifier'].value_counts()
data['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2])
data['Item_Type_Combined'] = data['Item_Type_Combined'].map({'FD':'Food',
'NC':'Non-Consumable',
'DR':'Drinks'})
data['Item_Type_Combined'].value_counts()
#Years:
data['Outlet_Years'] = 2013 - data['Outlet_Establishment_Year']
data['Outlet_Years'].describe()
#Change categories of low fat:
print 'Original Categories:'
print data['Item_Fat_Content'].value_counts()
print '\nModified Categories:'
data['Item_Fat_Content'] = data['Item_Fat_Content'].replace({'LF':'Low Fat',
'reg':'Regular',
'low fat':'Low Fat'})
print data['Item_Fat_Content'].value_counts()
#Mark non-consumables as separate category in low_fat:
data.loc[data['Item_Type_Combined']=="Non-Consumable",'Item_Fat_Content'] = "Non-Edible"
data['Item_Fat_Content'].value_counts()
#Import library:
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
#New variable for outlet
data['Outlet'] = le.fit_transform(data['Outlet_Identifier'])
var_mod = ['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Item_Type_Combined','Outlet_Type','Outlet']
le = LabelEncoder()
for i in var_mod:
data[i] = le.fit_transform(data[i])
#One Hot Coding:
data = pd.get_dummies(data, columns=['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Outlet_Type',
'Item_Type_Combined','Outlet'])
data.dtypes
data[['Item_Fat_Content_0','Item_Fat_Content_1','Item_Fat_Content_2']].head(10)
#Drop the columns which have been converted to different types:
data.drop(['Item_Type','Outlet_Establishment_Year'],axis=1,inplace=True)
#Divide into test and train:
train = data.loc[data['source']=="train"]
test = data.loc[data['source']=="test"]
#Drop unnecessary columns:
test.drop(['Item_Outlet_Sales','source'],axis=1,inplace=True)
train.drop(['source'],axis=1,inplace=True)
#Export files as modified versions:
train.to_csv("train_modified.csv",index=False)
test.to_csv("test_modified.csv",index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading data
Step2: 2. Data Cleaning
Step3: 2. Feature Engineering
Step4: Step2
Step5: Step 3
Step6: Step 4
Step7: Step 5
Step8: Step 6
Step9: Step7
|
4,349
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
## Your code here
train_words = # The final subsampled word list
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
return
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
inputs =
labels =
n_vocab = len(int_to_vocab)
n_embedding = # Number of embedding features
with train_graph.as_default():
embedding = # create embedding weight matrix here
embed = # use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = # create softmax weight matrix here
softmax_b = # create softmax biases here
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
4,350
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install tensorflow-lattice seaborn
import tensorflow as tf
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import sys
import tensorflow_lattice as tfl
logging.disable(sys.maxsize)
# List of learning rate hyperparameters to try.
# For a longer list of reasonable hyperparameters, try [0.001, 0.01, 0.1].
LEARNING_RATES = [0.01]
# Default number of training epochs and batch sizes.
NUM_EPOCHS = 1000
BATCH_SIZE = 1000
# Directory containing dataset files.
DATA_DIR = 'https://raw.githubusercontent.com/serenalwang/shape_constraints_for_ethics/master'
# Load data file.
law_file_name = 'lsac.csv'
law_file_path = os.path.join(DATA_DIR, law_file_name)
raw_law_df = pd.read_csv(law_file_path, delimiter=',')
# Define label column name.
LAW_LABEL = 'pass_bar'
def preprocess_law_data(input_df):
# Drop rows with where the label or features of interest are missing.
output_df = input_df[~input_df[LAW_LABEL].isna() & ~input_df['ugpa'].isna() &
(input_df['ugpa'] > 0) & ~input_df['lsat'].isna()]
return output_df
law_df = preprocess_law_data(raw_law_df)
def split_dataset(input_df, random_state=888):
Splits an input dataset into train, val, and test sets.
train_df, test_val_df = train_test_split(
input_df, test_size=0.3, random_state=random_state)
val_df, test_df = train_test_split(
test_val_df, test_size=0.66, random_state=random_state)
return train_df, val_df, test_df
law_train_df, law_val_df, law_test_df = split_dataset(law_df)
def plot_dataset_contour(input_df, title):
plt.rcParams['font.family'] = ['serif']
g = sns.jointplot(
x='ugpa',
y='lsat',
data=input_df,
kind='kde',
xlim=[1.4, 4],
ylim=[0, 50])
g.plot_joint(plt.scatter, c='b', s=10, linewidth=1, marker='+')
g.ax_joint.collections[0].set_alpha(0)
g.set_axis_labels('Undergraduate GPA', 'LSAT score', fontsize=14)
g.fig.suptitle(title, fontsize=14)
# Adust plot so that the title fits.
plt.subplots_adjust(top=0.9)
plt.show()
law_df_pos = law_df[law_df[LAW_LABEL] == 1]
plot_dataset_contour(
law_df_pos, title='Distribution of students that passed the bar')
law_df_neg = law_df[law_df[LAW_LABEL] == 0]
plot_dataset_contour(
law_df_neg, title='Distribution of students that failed the bar')
def train_tfl_estimator(train_df, monotonicity, learning_rate, num_epochs,
batch_size, get_input_fn,
get_feature_columns_and_configs):
Trains a TFL calibrated linear estimator.
Args:
train_df: pandas dataframe containing training data.
monotonicity: if 0, then no monotonicity constraints. If 1, then all
features are constrained to be monotonically increasing.
learning_rate: learning rate of Adam optimizer for gradient descent.
num_epochs: number of training epochs.
batch_size: batch size for each epoch. None means the batch size is the full
dataset size.
get_input_fn: function that returns the input_fn for a TF estimator.
get_feature_columns_and_configs: function that returns TFL feature columns
and configs.
Returns:
estimator: a trained TFL calibrated linear estimator.
feature_columns, feature_configs = get_feature_columns_and_configs(
monotonicity)
model_config = tfl.configs.CalibratedLinearConfig(
feature_configs=feature_configs, use_bias=False)
estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=get_input_fn(input_df=train_df, num_epochs=1),
optimizer=tf.keras.optimizers.Adam(learning_rate))
estimator.train(
input_fn=get_input_fn(
input_df=train_df, num_epochs=num_epochs, batch_size=batch_size))
return estimator
def optimize_learning_rates(
train_df,
val_df,
test_df,
monotonicity,
learning_rates,
num_epochs,
batch_size,
get_input_fn,
get_feature_columns_and_configs,
):
Optimizes learning rates for TFL estimators.
Args:
train_df: pandas dataframe containing training data.
val_df: pandas dataframe containing validation data.
test_df: pandas dataframe containing test data.
monotonicity: if 0, then no monotonicity constraints. If 1, then all
features are constrained to be monotonically increasing.
learning_rates: list of learning rates to try.
num_epochs: number of training epochs.
batch_size: batch size for each epoch. None means the batch size is the full
dataset size.
get_input_fn: function that returns the input_fn for a TF estimator.
get_feature_columns_and_configs: function that returns TFL feature columns
and configs.
Returns:
A single TFL estimator that achieved the best validation accuracy.
estimators = []
train_accuracies = []
val_accuracies = []
test_accuracies = []
for lr in learning_rates:
estimator = train_tfl_estimator(
train_df=train_df,
monotonicity=monotonicity,
learning_rate=lr,
num_epochs=num_epochs,
batch_size=batch_size,
get_input_fn=get_input_fn,
get_feature_columns_and_configs=get_feature_columns_and_configs)
estimators.append(estimator)
train_acc = estimator.evaluate(
input_fn=get_input_fn(train_df, num_epochs=1))['accuracy']
val_acc = estimator.evaluate(
input_fn=get_input_fn(val_df, num_epochs=1))['accuracy']
test_acc = estimator.evaluate(
input_fn=get_input_fn(test_df, num_epochs=1))['accuracy']
print('accuracies for learning rate %f: train: %f, val: %f, test: %f' %
(lr, train_acc, val_acc, test_acc))
train_accuracies.append(train_acc)
val_accuracies.append(val_acc)
test_accuracies.append(test_acc)
max_index = val_accuracies.index(max(val_accuracies))
return estimators[max_index]
def get_input_fn_law(input_df, num_epochs, batch_size=None):
Gets TF input_fn for law school models.
return tf.compat.v1.estimator.inputs.pandas_input_fn(
x=input_df[['ugpa', 'lsat']],
y=input_df['pass_bar'],
num_epochs=num_epochs,
batch_size=batch_size or len(input_df),
shuffle=False)
def get_feature_columns_and_configs_law(monotonicity):
Gets TFL feature configs for law school models.
feature_columns = [
tf.feature_column.numeric_column('ugpa'),
tf.feature_column.numeric_column('lsat'),
]
feature_configs = [
tfl.configs.FeatureConfig(
name='ugpa',
lattice_size=2,
pwl_calibration_num_keypoints=20,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
tfl.configs.FeatureConfig(
name='lsat',
lattice_size=2,
pwl_calibration_num_keypoints=20,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
]
return feature_columns, feature_configs
def get_predicted_probabilities(estimator, input_df, get_input_fn):
predictions = estimator.predict(
input_fn=get_input_fn(input_df=input_df, num_epochs=1))
return [prediction['probabilities'][1] for prediction in predictions]
def plot_model_contour(estimator, input_df, num_keypoints=20):
x = np.linspace(min(input_df['ugpa']), max(input_df['ugpa']), num_keypoints)
y = np.linspace(min(input_df['lsat']), max(input_df['lsat']), num_keypoints)
x_grid, y_grid = np.meshgrid(x, y)
positions = np.vstack([x_grid.ravel(), y_grid.ravel()])
plot_df = pd.DataFrame(positions.T, columns=['ugpa', 'lsat'])
plot_df[LAW_LABEL] = np.ones(len(plot_df))
predictions = get_predicted_probabilities(
estimator=estimator, input_df=plot_df, get_input_fn=get_input_fn_law)
grid_predictions = np.reshape(predictions, x_grid.shape)
plt.rcParams['font.family'] = ['serif']
plt.contour(
x_grid,
y_grid,
grid_predictions,
colors=('k',),
levels=np.linspace(0, 1, 11))
plt.contourf(
x_grid,
y_grid,
grid_predictions,
cmap=plt.cm.bone,
levels=np.linspace(0, 1, 11)) # levels=np.linspace(0,1,8));
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
cbar = plt.colorbar()
cbar.ax.set_ylabel('Model score', fontsize=20)
cbar.ax.tick_params(labelsize=20)
plt.xlabel('Undergraduate GPA', fontsize=20)
plt.ylabel('LSAT score', fontsize=20)
nomon_linear_estimator = optimize_learning_rates(
train_df=law_train_df,
val_df=law_val_df,
test_df=law_test_df,
monotonicity=0,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_law,
get_feature_columns_and_configs=get_feature_columns_and_configs_law)
plot_model_contour(nomon_linear_estimator, input_df=law_df)
mon_linear_estimator = optimize_learning_rates(
train_df=law_train_df,
val_df=law_val_df,
test_df=law_test_df,
monotonicity=1,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_law,
get_feature_columns_and_configs=get_feature_columns_and_configs_law)
plot_model_contour(mon_linear_estimator, input_df=law_df)
feature_names = ['ugpa', 'lsat']
dnn_estimator = tf.estimator.DNNClassifier(
feature_columns=[
tf.feature_column.numeric_column(feature) for feature in feature_names
],
hidden_units=[100, 100],
optimizer=tf.keras.optimizers.Adam(learning_rate=0.008),
activation_fn=tf.nn.relu)
dnn_estimator.train(
input_fn=get_input_fn_law(
law_train_df, batch_size=BATCH_SIZE, num_epochs=NUM_EPOCHS))
dnn_train_acc = dnn_estimator.evaluate(
input_fn=get_input_fn_law(law_train_df, num_epochs=1))['accuracy']
dnn_val_acc = dnn_estimator.evaluate(
input_fn=get_input_fn_law(law_val_df, num_epochs=1))['accuracy']
dnn_test_acc = dnn_estimator.evaluate(
input_fn=get_input_fn_law(law_test_df, num_epochs=1))['accuracy']
print('accuracies for DNN: train: %f, val: %f, test: %f' %
(dnn_train_acc, dnn_val_acc, dnn_test_acc))
plot_model_contour(dnn_estimator, input_df=law_df)
tree_estimator = tf.estimator.BoostedTreesClassifier(
feature_columns=[
tf.feature_column.numeric_column(feature) for feature in feature_names
],
n_batches_per_layer=2,
n_trees=20,
max_depth=4)
tree_estimator.train(
input_fn=get_input_fn_law(
law_train_df, num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE))
tree_train_acc = tree_estimator.evaluate(
input_fn=get_input_fn_law(law_train_df, num_epochs=1))['accuracy']
tree_val_acc = tree_estimator.evaluate(
input_fn=get_input_fn_law(law_val_df, num_epochs=1))['accuracy']
tree_test_acc = tree_estimator.evaluate(
input_fn=get_input_fn_law(law_test_df, num_epochs=1))['accuracy']
print('accuracies for GBT: train: %f, val: %f, test: %f' %
(tree_train_acc, tree_val_acc, tree_test_acc))
plot_model_contour(tree_estimator, input_df=law_df)
# Load data file.
credit_file_name = 'credit_default.csv'
credit_file_path = os.path.join(DATA_DIR, credit_file_name)
credit_df = pd.read_csv(credit_file_path, delimiter=',')
# Define label column name.
CREDIT_LABEL = 'default'
credit_train_df, credit_val_df, credit_test_df = split_dataset(credit_df)
def get_agg_data(df, x_col, y_col, bins=11):
xbins = pd.cut(df[x_col], bins=bins)
data = df[[x_col, y_col]].groupby(xbins).agg(['mean', 'sem'])
return data
def plot_2d_means_credit(input_df, x_col, y_col, x_label, y_label):
plt.rcParams['font.family'] = ['serif']
_, ax = plt.subplots(nrows=1, ncols=1)
plt.setp(ax.spines.values(), color='black', linewidth=1)
ax.tick_params(
direction='in', length=6, width=1, top=False, right=False, labelsize=18)
df_single = get_agg_data(input_df[input_df['MARRIAGE'] == 1], x_col, y_col)
df_married = get_agg_data(input_df[input_df['MARRIAGE'] == 2], x_col, y_col)
ax.errorbar(
df_single[(x_col, 'mean')],
df_single[(y_col, 'mean')],
xerr=df_single[(x_col, 'sem')],
yerr=df_single[(y_col, 'sem')],
color='orange',
marker='s',
capsize=3,
capthick=1,
label='Single',
markersize=10,
linestyle='')
ax.errorbar(
df_married[(x_col, 'mean')],
df_married[(y_col, 'mean')],
xerr=df_married[(x_col, 'sem')],
yerr=df_married[(y_col, 'sem')],
color='b',
marker='^',
capsize=3,
capthick=1,
label='Married',
markersize=10,
linestyle='')
leg = ax.legend(loc='upper left', fontsize=18, frameon=True, numpoints=1)
ax.set_xlabel(x_label, fontsize=18)
ax.set_ylabel(y_label, fontsize=18)
ax.set_ylim(0, 1.1)
ax.set_xlim(-2, 8.5)
ax.patch.set_facecolor('white')
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_facecolor('white')
leg.get_frame().set_linewidth(1)
plt.show()
plot_2d_means_credit(credit_train_df, 'PAY_0', 'default',
'Repayment Status (April)', 'Observed default rate')
def get_input_fn_credit(input_df, num_epochs, batch_size=None):
Gets TF input_fn for credit default models.
return tf.compat.v1.estimator.inputs.pandas_input_fn(
x=input_df[['MARRIAGE', 'PAY_0']],
y=input_df['default'],
num_epochs=num_epochs,
batch_size=batch_size or len(input_df),
shuffle=False)
def get_feature_columns_and_configs_credit(monotonicity):
Gets TFL feature configs for credit default models.
feature_columns = [
tf.feature_column.numeric_column('MARRIAGE'),
tf.feature_column.numeric_column('PAY_0'),
]
feature_configs = [
tfl.configs.FeatureConfig(
name='MARRIAGE',
lattice_size=2,
pwl_calibration_num_keypoints=3,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
tfl.configs.FeatureConfig(
name='PAY_0',
lattice_size=2,
pwl_calibration_num_keypoints=10,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
]
return feature_columns, feature_configs
def plot_predictions_credit(input_df,
estimator,
x_col,
x_label='Repayment Status (April)',
y_label='Predicted default probability'):
predictions = get_predicted_probabilities(
estimator=estimator, input_df=input_df, get_input_fn=get_input_fn_credit)
new_df = input_df.copy()
new_df.loc[:, 'predictions'] = predictions
plot_2d_means_credit(new_df, x_col, 'predictions', x_label, y_label)
nomon_linear_estimator = optimize_learning_rates(
train_df=credit_train_df,
val_df=credit_val_df,
test_df=credit_test_df,
monotonicity=0,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_credit,
get_feature_columns_and_configs=get_feature_columns_and_configs_credit)
plot_predictions_credit(credit_train_df, nomon_linear_estimator, 'PAY_0')
mon_linear_estimator = optimize_learning_rates(
train_df=credit_train_df,
val_df=credit_val_df,
test_df=credit_test_df,
monotonicity=1,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_credit,
get_feature_columns_and_configs=get_feature_columns_and_configs_credit)
plot_predictions_credit(credit_train_df, mon_linear_estimator, 'PAY_0')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorFlow Lattice を使った倫理のための形状制約
Step2: 必要なパッケージをインポートします。
Step3: このチュートリアルで使用されるデフォルト値です。
Step4: 事例 1
Step5: データセットを前処理します。
Step7: データをトレーニング/検証/テストセットに分割する
Step8: データの分布を視覚化する
Step11: 司法試験通過を予測する較正済み線形モデルをトレーニングする
Step14: 法科大学院データセットの特徴量を構成するためのヘルパー関数
Step15: トレーニング済みモデルの出力を視覚化するためのヘルパー関数
Step16: 制約なし(非単調性)の較正済み線形モデルをトレーニングする
Step17: 単調な較正済み線形モデルをトレーニングする
Step18: ほかの制約なしモデルをトレーニングする
Step19: 制約なしの勾配ブースティング木(GBT)モデルをトレーニングする
Step20: 事例 2
Step21: データをトレーニング/検証/テストセットに分割する
Step22: データの分布を視覚化する
Step25: 債務不履行率を予測する較正済み線形モデルをトレーニングする
Step26: トレーニング済みモデルの出力を視覚化するためのヘルパー関数
Step27: 制約なし(非単調性)の較正済み線形モデルをトレーニングする
Step28: 単調な較正済み線形モデルをトレーニングする
|
4,351
|
<ASSISTANT_TASK:>
Python Code:
# import data from url
from py2cytoscape.data.cyrest_client import CyRestClient
from IPython.display import Image
# Create REST client for Cytoscape
cy = CyRestClient()
# Reset current session for fresh start
cy.session.delete()
# Load a sample network
network = cy.network.create_from('http://chianti.ucsd.edu/~kono/data/galFiltered.sif')
# Apply layout to the cytoscape network object
cy.layout.apply(network = network)
# Show it!!
network_png = network.get_png()
Image(network_png)
# covert cytoscape network object to igraph object
import igraph as ig
import py2cytoscape.util.util_igraph as util_ig
# convert cytoscape object to igraph object
g = util_ig.to_igraph(network.to_json())
density = g.density()
print(density)
transitivity = g.transitivity_undirected()
print(transitivity)
# If you want to use this method, you have to use the non-multiple-edges graph object.
#community_fastgreedy = g.community_fastgreedy()
#print(community_fastgreedy)
closeness = g.closeness()
# Show 10 results of node closeness
print(closeness[0:9])
indegree = g.indegree()
outdegree = g.outdegree()
# Show 10 results of node degree
print(indegree[0:9])
print(outdegree[0:9])
pagerank = g.pagerank()
# Show 10 results of node degree
print(pagerank[0:9])
# If you want to use this method, you have to use the non-multiple-edges graph object.
#community_fastgreedy = g.community_fastgreedy()
#print(community_fastgreedy)
edge_betweenness = g.edge_betweenness()
print(edge_betweenness[0:9])
community_edge_betweenness_detection = g.community_edge_betweenness()
print(community_edge_betweenness_detection)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Global Network Analysis
Step2: Density
Step3: Transitivity
Step4: community detection
Step5: Node Analysis
Step6: Degree
Step7: PageRank
Step8: community detection
Step9: Edge Analysis
Step10: community detection
|
4,352
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pylab as plt
import convis
inp, out = convis.samples.generate_sample_data(input='random',size=(2000,20,20))
print(inp.shape)
m = convis.models.LN(kernel_dim=(12,7,7),population=True)
print(m.compute_loss(inp,out,dt=500))
m.set_optimizer.Adam()
for i in range(50):
m.optimize(inp,out,dt=500)
print(m.compute_loss(inp,out,dt=500))
convis.plot_5d_matshow(m.p.conv.weight)
plt.title('Filter after fitting 50x Adam')
convis.plot_5d_matshow(convis.samples._g.conv.weight)
plt.title('Random Ground Truth Filter')
m.set_optimizer.LBFGS()
losses = m.optimize(inp,out,dt=500)
print('losses during fitting:')
print(losses)
print(m.compute_loss(inp,out,dt=500))
convis.plot_5d_matshow(m.p.conv.weight)
plt.title('Filter after fitting with LBFGS')
m.plot_impulse_space()
convis.samples._g.plot_impulse_space()
m.save_parameters('fitted_model.npz')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, you need to choose a model, eg. an LN model as well.
Step2: We can have a look at how well our model is doing right now by looking
Step3: The default loss function is the sum of squared errors, but any loss
Step4: Then, we can optimize the model using our complete input,
Step5: We can examine the loss again after a few steps of fitting and also visualize the filters.
Step6: The loss decreased, but not by much.
Step7: Let's use a different algorithm to optimize our parameters.
Step8: Did we improve?
Step9: We can see clearly now that the size of our fitted kernel does not match the sample generator, which makes sense, since we normally don't have access to a ground-truth at all. But our model just set the border pixels and the first two frames to 0.
Step10: Finally, you can save the parameters into a
|
4,353
|
<ASSISTANT_TASK:>
Python Code:
# Install additional packages
!pip install -q matplotlib-venn
# Import all necessary libs
import json
import sys
import argparse
import pprint
import random
import datetime
import pandas as pd
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import discovery
from oauthlib.oauth2.rfc6749.errors import InvalidGrantError
from google.auth.transport.requests import AuthorizedSession
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from googleapiclient.errors import HttpError
from matplotlib import pyplot as plt
from matplotlib_venn import venn3, venn3_circles
from IPython.display import display, HTML
from googleapiclient.errors import HttpError
from google.colab import auth
auth.authenticate_user()
print('Authenticated')
# TODO: Update the value of these variables with your own values
DEVELOPER_KEY = 'AIzaSyBa6P2oQ2m6T5M5LGUxuOKBsvzTDX1rnOw' #'INSERT_DEVELOPER_KEY_HERE'
CLIENT_SECRETS_FILE = 'adh-key.json' #'Make sure you have correctly renamed this file and you have uploaded it in this colab'
# Other configuration variables
_APPLICATION_NAME = 'ADH Campaign Overlap'
_CREDENTIALS_FILE = 'fcq-credentials.json'
_SCOPES = 'https://www.googleapis.com/auth/adsdatahub'
_DISCOVERY_URL_TEMPLATE = 'https://%s/$discovery/rest?version=%s&key=%s'
_FCQ_DISCOVERY_FILE = 'fcq-discovery.json'
_FCQ_SERVICE = 'adsdatahub.googleapis.com'
_FCQ_VERSION = 'v1'
_REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
_SCOPE = ['https://www.googleapis.com/auth/adsdatahub']
_TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
MAX_PAGE_SIZE = 50
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _GetCredentialsFromInstalledApplicationFlow():
Get new credentials using the installed application flow.
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRETS_FILE, scopes=_SCOPE)
flow.redirect_uri = _REDIRECT_URI # Set the redirect URI used for the flow.
auth_url, _ = flow.authorization_url(prompt='consent')
print ('Log into the Google Account you use to access the Full Circle Query '
'v2 API and go to the following URL:\n%s\n' % auth_url)
print 'After approving the token, enter the verification code (if specified).'
code = raw_input('Code: ')
try:
flow.fetch_token(code=code)
except InvalidGrantError as ex:
print 'Authentication has failed: %s' % ex
sys.exit(1)
credentials = flow.credentials
_SaveCredentials(credentials)
return credentials
def _LoadCredentials():
Loads and instantiates Credentials from JSON credentials file.
with open(_CREDENTIALS_FILE, 'rb') as handler:
stored_creds = json.loads(handler.read())
creds = Credentials(client_id=stored_creds['client_id'],
client_secret=stored_creds['client_secret'],
token=None,
refresh_token=stored_creds['refresh_token'],
token_uri=_TOKEN_URI)
return creds
def _SaveCredentials(creds):
Save credentials to JSON file.
stored_creds = {
'client_id': getattr(creds, '_client_id'),
'client_secret': getattr(creds, '_client_secret'),
'refresh_token': getattr(creds, '_refresh_token')
}
with open(_CREDENTIALS_FILE, 'wb') as handler:
handler.write(json.dumps(stored_creds))
def GetCredentials():
Get stored credentials if they exist, otherwise return new credentials.
If no stored credentials are found, new credentials will be produced by
stepping through the Installed Application OAuth 2.0 flow with the specified
client secrets file. The credentials will then be saved for future use.
Returns:
A configured google.oauth2.credentials.Credentials instance.
try:
creds = _LoadCredentials()
creds.refresh(Request())
except IOError:
creds = _GetCredentialsFromInstalledApplicationFlow()
return creds
def GetDiscoveryDocument():
Downloads the Full Circle Query v2 discovery document.
Downloads the Full Circle Query v2 discovery document to fcq-discovery.json
if it is accessible. If the file already exists, it will be overwritten.
Raises:
ValueError: raised if the discovery document is inaccessible for any reason.
credentials = GetCredentials()
discovery_url = _DISCOVERY_URL_TEMPLATE % (
_FCQ_SERVICE, _FCQ_VERSION, DEVELOPER_KEY)
auth_session = AuthorizedSession(credentials)
discovery_response = auth_session.get(discovery_url)
if discovery_response.status_code == 200:
with open(_FCQ_DISCOVERY_FILE, 'wb') as handler:
handler.write(discovery_response.text)
else:
raise ValueError('Unable to retrieve discovery document for api name "%s"'
'and version "%s" via discovery URL: %s'
% _FCQ_SERVICE, _FCQ_VERSION, discovery_url)
def GetService():
Builds a configured Full Circle Query v2 API service.
Returns:
A googleapiclient.discovery.Resource instance configured for the Full Circle
Query v2 service.
credentials = GetCredentials()
discovery_url = _DISCOVERY_URL_TEMPLATE % (
_FCQ_SERVICE, _FCQ_VERSION, DEVELOPER_KEY)
service = discovery.build(
'adsdatahub', _FCQ_VERSION, credentials=credentials,
discoveryServiceUrl=discovery_url)
return service
def GetServiceFromDiscoveryDocument():
Builds a configured Full Circle Query v2 API service via discovery file.
Returns:
A googleapiclient.discovery.Resource instance configured for the Full Circle
Query API v2 service.
credentials = GetCredentials()
with open(_FCQ_DISCOVERY_FILE, 'rb') as handler:
discovery_doc = handler.read()
service = discovery.build_from_document(
service=discovery_doc, credentials=credentials)
return service
try:
full_circle_query = GetService()
except IOError as ex:
print ('Unable to create ads data hub service - %s' % ex)
print ('Did you specify the client secrets file in samples_util.py?')
sys.exit(1)
try:
# Execute the request.
response = full_circle_query.customers().list().execute()
except HttpError as e:
print (e)
sys.exit(1)
if 'customers' in response:
print ('ADH API Returned {} Ads Data Hub customers for the current user!'.format(len(response['customers'])))
for customer in response['customers']:
print(json.dumps(customer))
else:
print ('No customers found for current user.')
#@title Define the data source in BigQuery:
customer_id = 000000001 #@param
dataset_id = 000000001 #@param
big_query_project = 'bq_project_id' #@param Destination Project ID {type:"string"}
big_query_dataset = 'dataset_name' #@param Destination Dataset {type:"string"}
big_query_destination_table = "table_name" #@param {type:"string"}
query_name = "query_name" #@param {type:"string"}
#@title Define basic analysis parameters
group_type = 'dv360_insertion_order_id'#@param ["dv360_insertion_order_id", "dv360_line_item_id", "advertiser_id"]
overlap_type = 'impressions'#@param ["clicks", "impressions"]
start_date = '2019-08-01' #@param {type:"string"}
end_date = '2019-09-18' #@param {type:"string"}
#@title Define IOs/LI ids (comma separated) that will be used to select each of the 3 groups. leave it as -1 to exclude a group.
group_1_ids = '10048874, 9939957'#@param {type:"string"}
group_2_ids = '10048146, 9956341'#@param {type:"string"}
group_3_ids = '10048875, 9939959'#@param {type:"string"}
#@title Define friendly names (labels) for each group
group_1_lb = 'upper funnel'#@param {type:"string"}
group_2_lb = 'mid funnel'#@param {type:"string"}
group_3_lb = 'lower funnel'#@param {type:"string"}
if group_1_ids == '-1':
group_1_lb = ''
if group_2_ids == '-1':
group_2_lb = ''
if group_3_ids == '-1':
group_3_lb = ''
#assemble dynamic content dict
dc = {}
dc['group_type'] = group_type
dc['overlap_type'] = overlap_type
dc['start_date'] = start_date
dc['end_date'] = end_date
q1_1 = '''
### Step 1: Create a label for different targeted audiences and impressions
CREATE TABLE interactions_by_user_id_and_g AS (
SELECT
user_id,
SUM(IF(event.{group_type} IN UNNEST(@group_1_ids),1,0)) AS imp_g_1,
SUM(IF(event.{group_type} IN UNNEST(@group_2_ids),1,0)) AS imp_g_2,
SUM(IF(event.{group_type} IN UNNEST(@group_3_ids),1,0)) AS imp_g_3
FROM adh.cm_dt_{overlap_type} as imp
WHERE
event.{group_type} IN UNNEST(ARRAY_CONCAT(@group_1_ids,@group_2_ids,@group_3_ids))
AND
user_id != '0'
GROUP BY 1
);'''
q1_2 = '''
#Part 2 - calculate metrics
SELECT
COUNT(interactions.user_id) AS Unique_Cookies,
SUM(IF(interactions.imp_g_1 > 0 AND interactions.imp_g_2 + interactions.imp_g_3 = 0,1,0)) AS cookies_exclusive_g_1,
SUM(IF(interactions.imp_g_2 > 0 AND interactions.imp_g_1 + interactions.imp_g_3 = 0,1,0)) AS cookies_exclusive_g_2,
SUM(IF(interactions.imp_g_3 > 0 AND interactions.imp_g_1 + interactions.imp_g_2 = 0,1,0)) AS cookies_exclusive_g_3,
SUM(IF(interactions.imp_g_1 > 0 AND interactions.imp_g_2 > 0 AND interactions.imp_g_3 = 0 ,1,0)) AS cookies_g_1_2,
SUM(IF(interactions.imp_g_1 > 0 AND interactions.imp_g_3 > 0 AND interactions.imp_g_2 = 0,1,0)) AS cookies_g_1_3,
SUM(IF(interactions.imp_g_3 > 0 AND interactions.imp_g_2 > 0 AND interactions.imp_g_1 = 0,1,0)) AS cookies_g_2_3,
SUM(IF(interactions.imp_g_1 > 0 AND interactions.imp_g_2 > 0 AND interactions.imp_g_3 > 0 ,1,0)) AS cookies_g_1_2_3,
#3 count total impressions
SUM(interactions.imp_g_1 + interactions.imp_g_2 + interactions.imp_g_3) AS all_impressions,
#4 count total users
SUM(1) AS total_cookies
FROM
tmp.interactions_by_user_id_and_g AS interactions
'''
query_text = (q1_1 + q1_2).format(**dc)
print('Final BigQuery SQL:')
print(query_text)
parameters_type = {
"group_1_ids": {
"defaultValue": {
"value": ""
},
"type": {
"arrayType": {
"type": "INT64"
}
}
},
"group_2_ids": {
"defaultValue": {
"value": ""
},
"type": {
"arrayType": {
"type": "INT64"
}
}
},
"group_3_ids": {
"defaultValue": {
"value": ""
},
"type": {
"arrayType": {
"type": "INT64"
}
}
}
}
try:
full_circle_query = GetService()
except IOError, ex:
print 'Unable to create ads data hub service - %s' % ex
print 'Did you specify the client secrets file?'
sys.exit(1)
query_create_body = {
'name': query_name,
'title': query_name,
'queryText': query_text,
'parameterTypes': parameters_type
}
try:
# Execute the request.
new_query = full_circle_query.customers().analysisQueries().create(body=query_create_body, parent='customers/' + str(customer_id)).execute()
new_query_name = new_query["name"]
except HttpError as e:
print e
sys.exit(1)
print 'New query %s created for customer ID "%s":' % (new_query_name, customer_id)
print(json.dumps(new_query))
destination_table_full_path = big_query_project + '.' + big_query_dataset + '.' + big_query_destination_table
CUSTOMER_ID = customer_id
DATASET_ID = dataset_id
QUERY_NAME = query_name
DEST_TABLE = destination_table_full_path
#Dates
format_str = '%Y-%m-%d' # The format
start_date_obj = datetime.datetime.strptime(start_date, format_str)
end_date_obj = datetime.datetime.strptime(end_date, format_str)
START_DATE = {
"year": start_date_obj.year,
"month": start_date_obj.month,
"day": start_date_obj.day
}
END_DATE = {
"year": end_date_obj.year,
"month": end_date_obj.month,
"day": end_date_obj.day
}
try:
full_circle_query = GetService()
except IOError, ex:
print('Unable to create ads data hub service - %s' % ex)
print('Did you specify the client secrets file?')
sys.exit(1)
query_start_body = {
'spec': {
'adsDataCustomerId': DATASET_ID,
'startDate': START_DATE,
'endDate': END_DATE,
'parameterValues':
{"group_1_ids":
{"value": group_1_ids},
"group_2_ids":
{"value": group_2_ids},
"group_3_ids":
{"value": group_3_ids},
}
},
'destTable': DEST_TABLE,
'customerId': CUSTOMER_ID
}
try:
# Execute the request.
operation = full_circle_query.customers().analysisQueries().start(body=query_start_body, name=new_query_name).execute()
except HttpError as e:
print(e)
sys.exit(1)
print('Running query with name "%s" via the following operation:' % query_name)
print(json.dumps(operation))
import time
statusDone = False
while statusDone is False:
print("waiting for the job to complete...")
updatedOperation = full_circle_query.operations().get(name=operation['name']).execute()
if updatedOperation.has_key('done') and updatedOperation['done'] == True:
statusDone = True
if(statusDone == False):
time.sleep(5)
print("Job completed... Getting results")
#run bigQuery query
dc = {}
dc['table'] = big_query_dataset + '.' + big_query_destination_table
q1 = '''
select * from {table}
'''.format(**dc)
df1 = pd.io.gbq.read_gbq(q1, project_id=big_query_project, dialect='standard')
print('Total Cookies: ' + str(df1.total_cookies[0]))
from __future__ import division
# define main variables
cookies = {}
#Total
cookies['all'] = df1.total_cookies[0]
cookies['g1'] = df1.cookies_exclusive_g_1[0]
cookies['g2'] = df1.cookies_exclusive_g_2[0]
cookies['g3'] = df1.cookies_exclusive_g_3[0]
cookies['g12'] = df1.cookies_g_1_2[0]
cookies['g13'] = df1.cookies_g_1_3[0]
cookies['g23'] = df1.cookies_g_2_3[0]
cookies['g123'] = df1.cookies_g_1_2_3[0]
#percentage
cookies_p = {}
cookies_p['all'] = 1
cookies_p['g1'] = cookies['g1']/cookies['all']
cookies_p['g2'] = cookies['g2']/cookies['all']
cookies_p['g3'] = cookies['g3']/cookies['all']
cookies_p['g12'] = cookies['g12']/cookies['all']
cookies_p['g13'] = cookies['g13']/cookies['all']
cookies_p['g23'] = cookies['g23']/cookies['all']
cookies_p['g123'] = cookies['g123']/cookies['all']
# define table labels from variables at the start
table_labels = {
'g1':'1 - ' + group_1_lb,
'g2':'2 - ' + group_2_lb,
'g3':'3 - ' + group_3_lb,
'g12':'4 - ' + group_1_lb + ' and ' + group_2_lb,
'g13':'5 - ' + group_1_lb + ' and ' + group_3_lb,
'g23':'6 - ' + group_2_lb + ' and ' + group_3_lb,
'g123':'7 - ' + group_1_lb + ', '+ group_2_lb + ' and ' + group_3_lb,
'all': 'total'
}
#display results in table
def create_df_series(data_dict,labels):
retVal = {}
for key in data_dict:
data = data_dict[key]
label = labels[key]
retVal[label] = data
return retVal
col_cookies_percent = pd.Series(create_df_series(cookies_p,table_labels))
col_cookies = pd.Series(create_df_series(cookies,table_labels))
df_1_summary = pd.DataFrame({'Cookies':col_cookies,'cookies (%)':col_cookies_percent})
df_1_summary
#create diagram image
plt.figure(figsize=(20,15))
plt.title("Cookie Overlap across %s, %s and %s"%(group_1_lb, group_2_lb, group_3_lb))
venn_data_subset = [
cookies_p['g1'],cookies_p['g2'],cookies_p['g12'],
cookies_p['g3'],cookies_p['g13'],cookies_p['g23'],cookies_p['g123']
]
v = venn3(
subsets = venn_data_subset,
set_labels = (group_1_lb, group_2_lb, group_3_lb)
)
#replace diagram labels
def replace_diagram_labels(v):
for i, sl in enumerate(v.subset_labels):
if sl is not None:
sl.set_text(str(round(float(sl.get_text())*100,1))+'%\nof all cookies')
#plot diagram
replace_diagram_labels(v)
plt.show()
#@title Define floodlight ids (comma separated) that will be used as conversion
floodlight_activity_ids = '3716682,3714314,3716571,3714314,1399226'#@param
dc['activity_ids'] = floodlight_activity_ids
query_name_2 = 'query_name'#@param
big_query_destination_table_2 = 'table_name'#@param
q2_1 = '''
#Part 1: Get interactions
with interactions AS (
SELECT
user_id,
IF(event.{group_type} IN UNNEST(@group_1_ids),1,0) AS imp_g_1,
IF(event.{group_type} IN UNNEST(@group_2_ids),1,0) AS imp_g_2,
IF(event.{group_type} IN UNNEST(@group_3_ids),1,0) AS imp_g_3,
event.event_time AS interaction_event_time
FROM adh.cm_dt_{overlap_type}
WHERE event.{group_type} IN UNNEST(ARRAY_CONCAT(@group_1_ids,@group_2_ids,@group_3_ids))
AND user_id <> '0' #remove zeroed out ids
),'''
q2_2 = '''
conversions AS (
SELECT
user_id,
event.event_time AS conversion_event_time
FROM adh.cm_dt_activities
WHERE CAST(event.activity_id AS INT64) IN UNNEST(@activity_ids)
AND user_id <> '0'
#GROUP BY 1
),
'''
q2_3 = '''
#define which of these interactions led to a conversion
impressions_and_conversions AS(
SELECT
t0.user_id AS user_id,
t1.conversion_event_time,
t0.imp_g_1 AS imp_g_1,
t0.imp_g_2 AS imp_g_2,
t0.imp_g_3 AS imp_g_3
FROM interactions As t0
LEFT JOIN conversions t1 ON t0.user_id = t1.user_id
WHERE
t1.user_id IS NULL OR
interaction_event_time < conversion_event_time
),
'''
q2_4 = '''
#aggregate interactions per user
results_by_user_id AS (
SELECT
user_id,
COUNT(DISTINCT conversion_event_time) AS conversions,
SUM(imp_g_1) AS imp_g_1,
SUM(imp_g_2) AS imp_g_2,
SUM(imp_g_3) AS imp_g_3
FROM impressions_and_conversions
GROUP BY 1
)
'''
q2_5 = '''
#calculate group overlaps per user
SELECT
#cookie count
SUM(IF(imp_g_1 > 0 AND imp_g_2 + imp_g_3 = 0,1,0)) AS cookies_exclusive_g_1,
SUM(IF(imp_g_2 > 0 AND imp_g_1 + imp_g_3 = 0,1,0)) AS cookies_exclusive_g_2,
SUM(IF(imp_g_3 > 0 AND imp_g_1 + imp_g_2 = 0,1,0)) AS cookies_exclusive_g_3,
SUM(IF(imp_g_1 > 0 AND imp_g_2 > 0 AND imp_g_3 = 0 ,1,0)) AS cookies_g_1_2,
SUM(IF(imp_g_1 > 0 AND imp_g_3 > 0 AND imp_g_2 = 0,1,0)) AS cookies_g_1_3,
SUM(IF(imp_g_3 > 0 AND imp_g_2 > 0 AND imp_g_1 = 0,1,0)) AS cookies_g_2_3,
SUM(IF(imp_g_1 > 0 AND imp_g_2 > 0 AND imp_g_3 > 0 ,1,0)) AS cookies_g_1_2_3,
#conversion count
SUM(IF(imp_g_1 > 0 AND imp_g_2 + imp_g_3 = 0,conversions,0)) AS conversions_exclusive_g_1,
SUM(IF(imp_g_2 > 0 AND imp_g_1 + imp_g_3 = 0,conversions,0)) AS conversions_exclusive_g_2,
SUM(IF(imp_g_3 > 0 AND imp_g_1 + imp_g_2 = 0,conversions,0)) AS conversions_exclusive_g_3,
SUM(IF(imp_g_1 > 0 AND imp_g_2 > 0 AND imp_g_3 = 0 ,conversions,0)) AS conversions_g_1_2,
SUM(IF(imp_g_1 > 0 AND imp_g_3 > 0 AND imp_g_2 = 0,conversions,0)) AS conversions_g_1_3,
SUM(IF(imp_g_3 > 0 AND imp_g_2 > 0 AND imp_g_1 = 0,conversions,0)) AS conversions_g_2_3,
SUM(IF(imp_g_1 > 0 AND imp_g_2 > 0 AND imp_g_3 > 0 ,conversions,0)) AS conversions_g_1_2_3,
#total metrics count
SUM(conversions) AS total_conversions,
COUNT(1) AS total_cookies,
SUM(conversions) / COUNT(1) As total_conversions_per_cookie
FROM results_by_user_id
'''
dc = {}
dc['group_type'] = group_type
dc['overlap_type'] = overlap_type
dc['activity_ids'] = floodlight_activity_ids
q2 = (q2_1 + q2_2 + q2_3 + q2_4 + q2_5).format(**dc)
print('Final BigQuery SQL:')
print(q2)
parameters_type = {
"group_1_ids": {
"defaultValue": {
"value": ""
},
"type": {
"arrayType": {
"type": "INT64"
}
}
},
"group_2_ids": {
"defaultValue": {
"value": ""
},
"type": {
"arrayType": {
"type": "INT64"
}
}
},
"group_3_ids": {
"defaultValue": {
"value": ""
},
"type": {
"arrayType": {
"type": "INT64"
}
}
},
"activity_ids": {
"defaultValue": {
"value": ""
},
"type": {
"arrayType": {
"type": "INT64"
}
}
}
}
try:
full_circle_query = GetService()
except IOError, ex:
print 'Unable to create ads data hub service - %s' % ex
print 'Did you specify the client secrets file?'
sys.exit(1)
query_create_body = {
'name': query_name_2,
'title': query_name_2,
'queryText': q2,
'parameterTypes': parameters_type
}
try:
# Execute the request.
new_query = full_circle_query.customers().analysisQueries().create(body=query_create_body, parent='customers/' + str(customer_id)).execute()
new_query_name = new_query["name"]
except HttpError as e:
print e
sys.exit(1)
print 'New query created for customer ID "%s":' % customer_id
print(json.dumps(new_query))
destination_table_full_path = big_query_project + '.' + big_query_dataset + '.' + big_query_destination_table_2
CUSTOMER_ID = customer_id
DATASET_ID = dataset_id
QUERY_NAME = query_name
DEST_TABLE = destination_table_full_path
#Dates
format_str = '%Y-%m-%d' # The format
start_date_obj = datetime.datetime.strptime(start_date, format_str)
end_date_obj = datetime.datetime.strptime(end_date, format_str)
START_DATE = {
"year": start_date_obj.year,
"month": start_date_obj.month,
"day": start_date_obj.day
}
END_DATE = {
"year": end_date_obj.year,
"month": end_date_obj.month,
"day": end_date_obj.day
}
try:
full_circle_query = GetService()
except IOError, ex:
print('Unable to create ads data hub service - %s' % ex)
print('Did you specify the client secrets file?')
sys.exit(1)
query_start_body = {
'spec': {
'adsDataCustomerId': DATASET_ID,
'startDate': START_DATE,
'endDate': END_DATE,
'parameterValues':
{"group_1_ids":
{"value": group_1_ids},
"group_2_ids":
{"value": group_2_ids},
"group_3_ids":
{"value": group_3_ids},
"activity_ids":
{"value": floodlight_activity_ids}
}
},
'destTable': DEST_TABLE,
'customerId': CUSTOMER_ID
}
try:
# Execute the request.
operation = full_circle_query.customers().analysisQueries().start(body=query_start_body, name=new_query_name).execute()
except HttpError as e:
print(e)
sys.exit(1)
print('Running query with name "%s" via the following operation:' % query_name)
print(json.dumps(operation))
statusDone = False
while statusDone is False:
print("waiting for the job to complete...")
updatedOperation = full_circle_query.operations().get(name=operation['name']).execute()
if updatedOperation.has_key('done') and updatedOperation['done'] == True:
statusDone = True
if(statusDone == False):
time.sleep(5)
print("Job completed... Getting results")
#run bigQuery query
dc = {}
dc['table'] = big_query_dataset + '.' + big_query_destination_table_2
q1 = '''
select * from {table}
'''.format(**dc)
df2 = pd.io.gbq.read_gbq(q1, project_id=big_query_project, dialect='standard')
print(dc['table'])
print('Total Cookies: ' + str(df2.total_cookies[0]))
print('Total Conversions: ' + str(df2.total_conversions[0]))
# define main variables
cookies = {}
cookies['all'] = df2.total_cookies[0]
cookies['g1'] = df2.cookies_exclusive_g_1[0]
cookies['g2'] = df2.cookies_exclusive_g_2[0]
cookies['g3'] = df2.cookies_exclusive_g_3[0]
cookies['g12'] = df2.cookies_g_1_2[0]
cookies['g13'] = df2.cookies_g_1_3[0]
cookies['g23'] = df2.cookies_g_2_3[0]
cookies['g123'] = df2.cookies_g_1_2_3[0]
cookies_p = {}
cookies_p['all'] = 1
cookies_p['g1'] = cookies['g1']/cookies['all']
cookies_p['g2'] = cookies['g2']/cookies['all']
cookies_p['g3'] = cookies['g3']/cookies['all']
cookies_p['g12'] = cookies['g12']/cookies['all']
cookies_p['g13'] = cookies['g13']/cookies['all']
cookies_p['g23'] = cookies['g23']/cookies['all']
cookies_p['g123'] = cookies['g123']/cookies['all']
# define table labels
table_labels = {
'g1':'1 - ' + group_1_lb,
'g2':'2 - ' + group_2_lb,
'g3':'3 - ' + group_3_lb,
'g12':'4 - ' + group_1_lb + ' and ' + group_2_lb,
'g13':'5 - ' + group_1_lb + ' and ' + group_3_lb,
'g23':'6 - ' + group_2_lb + ' and ' + group_3_lb,
'g123':'7 - ' + group_1_lb + ', '+ group_2_lb + ' and ' + group_3_lb,
'all': 'total'
}
#display results in table
def create_df_series(data_dict,labels):
retVal = {}
for key in data_dict:
data = data_dict[key]
label = labels[key]
retVal[label] = data
return retVal
col_cookies_percent = pd.Series(create_df_series(cookies_p,table_labels))
col_cookies = pd.Series(create_df_series(cookies,table_labels))
df_2_summary = pd.DataFrame({'Cookies':col_cookies,'cookies (%)':col_cookies_percent})
df_2_summary
#calculate cookie overlap across groups
all_cookies = df2.total_cookies[0]
g1 = round((df2.cookies_exclusive_g_1[0]/all_cookies*100),2)
g2 = round((df2.cookies_exclusive_g_2[0]/all_cookies)*100, 2)
g3 = round((df2.cookies_exclusive_g_3[0]/all_cookies)*100, 2)
g12 = round((df2.cookies_g_1_2[0]/all_cookies)*100, 2)
g13 = round((df2.cookies_g_1_3[0]/all_cookies)*100, 2)
g23 = round((df2.cookies_g_2_3[0]/all_cookies)*100, 2)
g123 = round((df2.cookies_g_1_2_3[0]/all_cookies)*100, 2)
#calculate conversions per cookie metric
all_conversions = df2.total_conversions[0]
g1_conv_user = round((df2.conversions_exclusive_g_1[0]/df2.cookies_exclusive_g_1[0]*100),3)
g2_conv_user = round((df2.conversions_exclusive_g_2[0]/df2.cookies_exclusive_g_2[0]*100),3)
g3_conv_user = round((df2.conversions_exclusive_g_3[0]/df2.cookies_exclusive_g_3[0]*100),3)
g12_conv_user = round((df2.conversions_g_1_2[0]/df2.cookies_g_1_2[0]*100),3)
g13_conv_user = round((df2.conversions_g_1_3[0]/df2.cookies_g_1_3[0]*100),3)
g23_conv_user = round((df2.conversions_g_2_3[0]/df2.cookies_g_2_3[0]*100),3)
g123_conv_user = round((df2.conversions_g_1_2_3[0]/df2.cookies_g_1_2_3[0]*100),3)
conv_per_cookie = [g1_conv_user, g2_conv_user, g12_conv_user, g3_conv_user,g13_conv_user,g23_conv_user,g123_conv_user]
subsets = ['g1', 'g2', 'g12', 'g3', 'g13', 'g23', 'g123']
plt.figure(figsize=(25,15))
plt.title("Cookie Overlap across %s, %s and %s"%(group_1_lb, group_2_lb, group_3_lb))
v = venn3(subsets = (g1, g2, g12, g3, g13, g23, g123), set_labels = (group_1_lb, group_2_lb, group_3_lb))
def replace_diagram_labels(v):
for i, sl in enumerate(v.subset_labels):
if sl is not None:
print(table_labels[subsets[i]] +': '+ sl.get_text()+'% of all cookies. '+str(conv_per_cookie[i])+'% cvr')
sl.set_text(sl.get_text()+'% of all cookies. \n'+str(conv_per_cookie[i])+'% cvr')
replace_diagram_labels(v)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: API Configuration
Step9: API Authentication - OAuth2.0 Flow
Step10: Actual Request to the Ads Data Hub Service API
Step11: Analysis 1
Step12: 1.2 Analysis Parameters
Step13: 1.3 Funnel groups
Step14: 1.4 Friendly Names
Step15: Step 2
Step16: Create the Query
Step17: Part 2 - calculate metrics
Step18: Put the query together
Step19: Set up group parameters
Step20: Create the Query in ADH
Step21: Check the query exists in ADH
Step22: 2.3 Retrieve the result from BigQuery
Step23: Step 3
Step24: Step 4 - Display the output
Step25: What is the overlap between your groups?
Step26: Step 2
Step27: output example
Step28: output example
Step29: example output
Step30: example output
Step31: example output
Step32: Set the parameter types
Step33: Create the query in ADH
Step34: 2.2
Step35: 2.3 Get the results from BigQuery
Step36: Step 3
Step37: 3.2 Calculate the percentage cookie overlap and conversion rate and format the values
Step38: Step 4
|
4,354
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import itertools
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
tf.logging.set_verbosity('ERROR')
import tensorflow_datasets as tfds
import tensorflow_hub as hub
try:
from google.colab import data_table
def display_df(df):
return data_table.DataTable(df, include_index=False)
except ModuleNotFoundError:
# If google-colab is not available, just display the raw DataFrame
def display_df(df):
return df
# Use the inner product between two embedding vectors as the similarity measure
def plot_correlation(labels, features):
corr = np.inner(features, features)
corr /= np.max(corr)
sns.heatmap(corr, xticklabels=labels, yticklabels=labels)
with tf.Graph().as_default():
# Load the module
query_input = tf.placeholder(tf.string)
module = hub.Module('https://tfhub.dev/tensorflow/cord-19/swivel-128d/1')
embeddings = module(query_input)
with tf.train.MonitoredTrainingSession() as sess:
# Generate embeddings for some terms
queries = [
# Related viruses
"coronavirus", "SARS", "MERS",
# Regions
"Italy", "Spain", "Europe",
# Symptoms
"cough", "fever", "throat"
]
features = sess.run(embeddings, feed_dict={query_input: queries})
plot_correlation(queries, features)
#@title Set up the dataset from TFDS
class Dataset:
Build a dataset from a TFDS dataset.
def __init__(self, tfds_name, feature_name, label_name):
self.dataset_builder = tfds.builder(tfds_name)
self.dataset_builder.download_and_prepare()
self.feature_name = feature_name
self.label_name = label_name
def get_data(self, for_eval):
splits = THE_DATASET.dataset_builder.info.splits
if tfds.Split.TEST in splits:
split = tfds.Split.TEST if for_eval else tfds.Split.TRAIN
else:
SPLIT_PERCENT = 80
split = "train[{}%:]".format(SPLIT_PERCENT) if for_eval else "train[:{}%]".format(SPLIT_PERCENT)
return self.dataset_builder.as_dataset(split=split)
def num_classes(self):
return self.dataset_builder.info.features[self.label_name].num_classes
def class_names(self):
return self.dataset_builder.info.features[self.label_name].names
def preprocess_fn(self, data):
return data[self.feature_name], data[self.label_name]
def example_fn(self, data):
feature, label = self.preprocess_fn(data)
return {'feature': feature, 'label': label}, label
def get_example_data(dataset, num_examples, **data_kw):
Show example data
with tf.Session() as sess:
batched_ds = dataset.get_data(**data_kw).take(num_examples).map(dataset.preprocess_fn).batch(num_examples)
it = tf.data.make_one_shot_iterator(batched_ds).get_next()
data = sess.run(it)
return data
TFDS_NAME = 'scicite' #@param {type: "string"}
TEXT_FEATURE_NAME = 'string' #@param {type: "string"}
LABEL_NAME = 'label' #@param {type: "string"}
THE_DATASET = Dataset(TFDS_NAME, TEXT_FEATURE_NAME, LABEL_NAME)
#@title Let's take a look at a few labeled examples from the training set
NUM_EXAMPLES = 20 #@param {type:"integer"}
data = get_example_data(THE_DATASET, NUM_EXAMPLES, for_eval=False)
display_df(
pd.DataFrame({
TEXT_FEATURE_NAME: [ex.decode('utf8') for ex in data[0]],
LABEL_NAME: [THE_DATASET.class_names()[x] for x in data[1]]
}))
def preprocessed_input_fn(for_eval):
data = THE_DATASET.get_data(for_eval=for_eval)
data = data.map(THE_DATASET.example_fn, num_parallel_calls=1)
return data
def input_fn_train(params):
data = preprocessed_input_fn(for_eval=False)
data = data.repeat(None)
data = data.shuffle(1024)
data = data.batch(batch_size=params['batch_size'])
return data
def input_fn_eval(params):
data = preprocessed_input_fn(for_eval=True)
data = data.repeat(1)
data = data.batch(batch_size=params['batch_size'])
return data
def input_fn_predict(params):
data = preprocessed_input_fn(for_eval=True)
data = data.batch(batch_size=params['batch_size'])
return data
def model_fn(features, labels, mode, params):
# Embed the text
embed = hub.Module(params['module_name'], trainable=params['trainable_module'])
embeddings = embed(features['feature'])
# Add a linear layer on top
logits = tf.layers.dense(
embeddings, units=THE_DATASET.num_classes(), activation=None)
predictions = tf.argmax(input=logits, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'logits': logits,
'predictions': predictions,
'features': features['feature'],
'labels': features['label']
})
# Set up a multi-class classification head
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=params['learning_rate'])
train_op = optimizer.minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
precision = tf.metrics.precision(labels=labels, predictions=predictions)
recall = tf.metrics.recall(labels=labels, predictions=predictions)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops={
'accuracy': accuracy,
'precision': precision,
'recall': recall,
})
#@title Hyperparmeters { run: "auto" }
EMBEDDING = 'https://tfhub.dev/tensorflow/cord-19/swivel-128d/1' #@param {type: "string"}
TRAINABLE_MODULE = False #@param {type: "boolean"}
STEPS = 8000#@param {type: "integer"}
EVAL_EVERY = 200 #@param {type: "integer"}
BATCH_SIZE = 10 #@param {type: "integer"}
LEARNING_RATE = 0.01 #@param {type: "number"}
params = {
'batch_size': BATCH_SIZE,
'learning_rate': LEARNING_RATE,
'module_name': EMBEDDING,
'trainable_module': TRAINABLE_MODULE
}
estimator = tf.estimator.Estimator(functools.partial(model_fn, params=params))
metrics = []
for step in range(0, STEPS, EVAL_EVERY):
estimator.train(input_fn=functools.partial(input_fn_train, params=params), steps=EVAL_EVERY)
step_metrics = estimator.evaluate(input_fn=functools.partial(input_fn_eval, params=params))
print('Global step {}: loss {:.3f}, accuracy {:.3f}'.format(step, step_metrics['loss'], step_metrics['accuracy']))
metrics.append(step_metrics)
global_steps = [x['global_step'] for x in metrics]
fig, axes = plt.subplots(ncols=2, figsize=(20,8))
for axes_index, metric_names in enumerate([['accuracy', 'precision', 'recall'],
['loss']]):
for metric_name in metric_names:
axes[axes_index].plot(global_steps, [x[metric_name] for x in metrics], label=metric_name)
axes[axes_index].legend()
axes[axes_index].set_xlabel("Global Step")
predictions = estimator.predict(functools.partial(input_fn_predict, params))
first_10_predictions = list(itertools.islice(predictions, 10))
display_df(
pd.DataFrame({
TEXT_FEATURE_NAME: [pred['features'].decode('utf8') for pred in first_10_predictions],
LABEL_NAME: [THE_DATASET.class_names()[pred['labels']] for pred in first_10_predictions],
'prediction': [THE_DATASET.class_names()[pred['predictions']] for pred in first_10_predictions]
}))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TF-Hub CORD-19 Swivel 埋め込みを探索する
Step2: 埋め込みを分析する
Step5: 埋め込みが異なる用語の意味をうまく捉えていることが分かります。それぞれの単語は所属するクラスタの他の単語に類似していますが(「コロナウイルス」は「SARS」や「MERS」と高い関連性がある)、ほかのクラスタの単語とは異なります(「SARS」と「スペイン」の類似度はゼロに近い)。
Step6: 引用の意図分類器をトレーニングする
Step7: 上に分類レイヤーを持ち、CORD-19 埋め込みを使用するモデルを構築してみましょう。
Step8: モデルをトレーニングして評価する
Step9: 損失はすぐに減少しますが、特に精度は急速に上がることが分かります。予測と真のラベルがどのように関係しているかを確認するために、いくつかの例をプロットしてみましょう。
|
4,355
|
<ASSISTANT_TASK:>
Python Code:
popSize = 4 # This is the number of nodes in the network
update = 'BD' # Either 'BD' or 'DB' for Birth-death or death-Birth updating respectively
direction = 'undirected' # Either 'directed' or 'undirected' graphs are used
stepSize = 0.05 # Step size for the probability for each link in the network to be present independently. We used 0.05.
numberOfGraphs = 500 # We used 500
for probLinkConnect in np.arange(0.0,1.0+stepSize,stepSize):
for graph in range(0, numberOfGraphs):
!python randomGraphsNumerical.py $popSize $probLinkConnect $graph $update $direction
!python AmplifierQ.py $popSize $numberOfGraphs $stepSize $update $direction
from Plot import *
Plot(popSize, numberOfGraphs, stepSize, update, direction)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's create the graphs, calculate the fixation probability and store it in one file per graph in the folder output.
Step2: Classify graphs into amplifiers and suppressors
Step3: Plotting
|
4,356
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-2', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
4,357
|
<ASSISTANT_TASK:>
Python Code:
import turicreate as tc
sf = tc.SFrame.read_csv("/Users/datalab/bigdata/cjc/w15",
header=False)
sf
dir(sf['X1'])
bow = sf['X1']._count_words()
type(sf['X1'])
type(bow)
bow.dict_has_any_keys(['limited'])
bow.dict_values()[0][:20]
sf
sf['bow'] = bow
sf
type(sf['bow'])
len(sf['bow'])
list(sf['bow'][0].items())[:3]
sf['tfidf'] = tc.text_analytics.tf_idf(sf['X1'])
sf
list(sf['tfidf'][0].items())[:5]
docs = sf['bow'].dict_trim_by_values(2)
docs = docs.dict_trim_by_keys(
tc.text_analytics.stop_words(),
exclude=True)
help(tc.topic_model.create)
help(tc.text_analytics.random_split)
train, test = tc.text_analytics.random_split(docs, .8)
m = tc.topic_model.create(train,
num_topics=100, # number of topics
num_iterations=100, # algorithm parameters
alpha=None, beta=.1) # hyperparameters
results = m.evaluate(test)
print(results['perplexity'])
m
m.get_topics()
help(m.get_topics)
topics = m.get_topics(num_words=10).unstack(['word','score'], \
new_column_name='topic_words')['topic_words'].apply(lambda x: x.keys())
for topic in topics:
print(topic)
help(m)
def print_topics(m):
topics = m.get_topics(num_words=5)
topics = topics.unstack(['word','score'], new_column_name='topic_words')['topic_words']
topics = topics.apply(lambda x: x.keys())
for topic in topics:
print(topic)
print_topics(m)
dir(m)
m.vocabulary
m.topics
m2 = tc.topic_model.create(docs,
num_topics=100,
initial_topics=m.topics)
associations = tc.SFrame()
associations['word'] = ['recognition']
associations['topic'] = [0]
m2 = tc.topic_model.create(docs,
num_topics=20,
num_iterations=50,
associations=associations,
verbose=False)
m2.get_topics(num_words=10)
print_topics(m2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Data
Step2: Transformations
Step3: Text cleaning
Step4: Topic modeling
Step5: pred = m.predict(another_data)
Step6: Seeding the model with prior knowledge
|
4,358
|
<ASSISTANT_TASK:>
Python Code:
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
# Read the forward solutions with surface orientation
fwd = mne.read_forward_solution(fwd_fname)
mne.convert_forward_solution(fwd, surf_ori=True, copy=False)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
plt.show()
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.legend()
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.show()
grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[0, 50, 100]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute sensitivity maps
Step2: Show gain matrix a.k.a. leadfield matrix with sensitivity map
|
4,359
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
v = np.array([2,1])
w = 2 * v
print(w)
# Plot w
origin = [0], [0]
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, *w, scale=10)
plt.show()
b = v / 2
print(b)
# Plot b
origin = [0], [0]
plt.axis('equal')
plt.grid()
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.quiver(*origin, *b, scale=10)
plt.show()
import numpy as np
v = np.array([2,1])
s = np.array([-3,2])
d = np.dot(v,s)
print (d)
import numpy as np
v = np.array([2,1])
s = np.array([-3,2])
d = v @ s
print (d)
import math
import numpy as np
# define our vectors
v = np.array([2,1])
s = np.array([-3,2])
# get the magnitudes
vMag = np.linalg.norm(v)
sMag = np.linalg.norm(s)
# calculate the cosine of theta
cos = (v @ s) / (vMag * sMag)
# so theta (in degrees) is:
theta = math.degrees(math.acos(cos))
print(theta)
import numpy as np
p = np.array([2,3,1])
q = np.array([1,2,-2])
r = np.cross(p,q)
print (r)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The same approach is taken for scalar division.
Step2: Dot Product Multiplication
Step3: In Python 3.5 and later, you can also use the @ operator to calculate the dot product
Step4: The Cosine Rule
Step5: Cross Product Multiplication
|
4,360
|
<ASSISTANT_TASK:>
Python Code:
# your code here
current_users = ["lira", "sarah", "james", "nicole", "AJ"]
new_users = ["lira", "sarah", "li", "chidi", "olympia"]
for i in current_users:
if i in new_users:
print("You need to enter a new username")
else:
print("This username is available")
# your code here
def color_probability(texture, color):
if texture == "smooth":
if color in ("red","yellow","green"):
return "0.33"
else:
return 0
elif texture == "bumpy":
if color == "red":
return "0.66"
elif color == "yellow":
return "0.33"
else:
return 0
color_probability("bumpy","red")
# your code here
age = 65
if age < 2:
print("This person is a baby")
elif (age >= 2 and age < 4):
print("This person is a toddler")
elif (age >= 4 and age < 13):
print("This person is a kid")
elif (age >= 13 and age < 20):
print("This person is a teenager")
elif (age >= 20 and age < 65):
print("This person is an adult")
elif (age >= 65):
print("This person is an elder")
# your code here
nums = [[2, 5], [3, 4], [8, 7]]
def process_data(data):
hold = 1
for i in data:
hold *= i[0]-i[1]
return hold
process_data(nums)
# your code here
people = [["Grae Drake", 98110], ["Bethany Kok"], ["Alex Nussbacher", 94101], ["Darrell Silver", 11201]]
def user_contacts(data):
users = {}
for i in people:
if len(i) == 2:
users[i[0]] = i[1]
else:
users[i[0]] = "None"
return users
user_contacts(people)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Color probability
Step2: 3. Write an if-elif-else chain that determines a person’s stage of life.
Step3: 4. process_data
Step4: 5. user_contacts
|
4,361
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import signal
from scipy import stats
# check Parseval's theorem holds numerically
nsamps=1000
# window
w = signal.tukey(nsamps,0.1)
a = np.random.normal(0,1,nsamps) * w
A = np.fft.fft(a)
b = (1/np.sqrt(2*np.pi))*(signal.gaussian(nsamps,10))
B = np.fft.fft(b)
c = np.convolve(a,b,'same')
C = np.fft.fft(c)
# signal c is convolution of Gaussian noise (a) with a Gaussian wavelet (b)
# C is the fourier transform of c.
sumt = np.sum(c**2)
sumf = np.sum(np.abs(C)**2)/nsamps
print('time domain',sumt)
print('fourier domain',sumf)
print('difference',np.abs(sumt-sumf))
print('percent', (np.abs(sumt-sumf)/sumt)*100)
AB = A * B
ab = np.fft.ifft(AB)
plt.plot(np.roll(ab,500))
plt.plot(c)
sumAB = np.sum(np.abs(A**2*B**2))/nsamps
print('sum A*B',sumAB)
print('difference',np.abs(sumt-sumAB))
print('percent',(np.abs(sumt-sumAB)/sumt)*100)
def ndf(y,taper=True,detrend=True):
Uses the improvement found by Walsh et al (2013).
By default will detrend data to ensure zero mean
and will taper edges using a Tukey filter affecting amplitudes of 5% of data at edges
if taper is True:
y = y * signal.tukey(y.size,0.05)
if detrend is True:
# ensure no trend on the noise trace
y = signal.detrend(y)
Y = np.fft.fft(y)
amp = np.absolute(Y)
# estimate E2 and E4 following Walsh et al (2013)
a = np.ones(Y.size)
a[0] = a[-1] = 0.5
E2 = np.sum( a * amp**2)
E4 = (np.sum( (4 * a**2 / 3) * amp**4))
ndf = 2 * ( 2 * E2**2 / E4 - 1 )
return ndf
def ndf2(y,taper=True,detrend=True):
if taper is True:
y = y * signal.tukey(y.size,0.05)
if detrend is True:
# ensure no trend on the noise trace
y = signal.detrend(y)
Y = np.fft.fft(y)
amp = np.absolute(Y)**2
E2 = np.sum(amp**2)
E4 = (np.sum( (4/3) * amp**4))
ndf = 2 * ( 2 * E2**2 / E4 - 1 )
return ndf
print(ndf(c))
print(ndf2(c))
stats.moment(c,moment=4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parsevals theorem when applied to discrete Fourier Transform looks like this.
Step2: Furthermore by the convolution theorem
Step5: Parsevals theorem as applied in Silver and Chan (and Walsh).
|
4,362
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import aiplatform as vertex_ai
!python -c "import tensorflow as tf; print(f'TF version: {tf.__version__}')"
!python -c "import tfx; print(f'TFX version: {tfx.__version__}')"
!python -c "import kfp; print(f'KFP version: {kfp.__version__}')"
print(f"vertex_ai: {vertex_ai.__version__}")
%cd pipeline_vertex
!ls -la
# TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT.
REGION = "us-central1"
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
ARTIFACT_STORE = f"gs://{PROJECT_ID}"
# Set your resource settings as environment variables. These override the default values in pipeline/config.py.
%env REGION={REGION}
%env ARTIFACT_STORE={ARTIFACT_STORE}
%env PROJECT_ID={PROJECT_ID}
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
PIPELINE_NAME = "tfxcovertype"
DATA_ROOT_URI = f"gs://{PROJECT_ID}/data/tfxcovertype"
TFX_IMAGE_URI = f"gcr.io/{PROJECT_ID}/{PIPELINE_NAME}"
PIPELINE_JSON = f"{PIPELINE_NAME}.json"
TRAIN_STEPS = 10
EVAL_STEPS = 5
%env PIPELINE_NAME={PIPELINE_NAME}
%env DATA_ROOT_URI={DATA_ROOT_URI}
%env TFX_IMAGE_URI={TFX_IMAGE_URI}
%env PIPELINE_JSON={PIPELINE_JSON}
%env TRAIN_STEPS={TRAIN_STEPS}
%env EVAL_STEPS={EVAL_STEPS}
!gsutil cp ../../../data/* $DATA_ROOT_URI/dataset.csv
!gsutil ls $DATA_ROOT_URI/*
!gcloud builds submit --timeout 15m --tag $TFX_IMAGE_URI .
!tfx pipeline compile --engine vertex --pipeline_path runner.py
vertex_ai.init(project=PROJECT_ID, location=REGION)
pipeline = vertex_ai.PipelineJob(
display_name="tfxcovertype4",
template_path=PIPELINE_JSON,
enable_caching=False,
)
pipeline.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Validate lab package version installation
Step2: Note
Step3: The config.py module configures the default values for the environment specific settings and the default values for the pipeline runtime parameters.
Step4: Set the compile time settings to first create a pipeline version without hyperparameter tuning
Step5: Let us populate the data bucket at DATA_ROOT_URI
Step6: Let us build and push the TFX container image described in the Dockerfile
Step7: Compile your pipeline code
Step8: Note
|
4,363
|
<ASSISTANT_TASK:>
Python Code:
from games import (GameState, Game, Fig52Game, TicTacToe, query_player, random_player,
alphabeta_player, play_game, minimax_decision, alphabeta_full_search,
alphabeta_search, Canvas_TicTacToe)
%psource Game
%psource TicTacToe
game52 = Fig52Game()
print(random_player(game52, 'A'))
print(random_player(game52, 'A'))
print( alphabeta_player(game52, 'A') )
print( alphabeta_player(game52, 'B') )
print( alphabeta_player(game52, 'C') )
minimax_decision('A', game52)
alphabeta_full_search('A', game52)
play_game(game52, alphabeta_player, alphabeta_player)
play_game(game52, alphabeta_player, random_player)
#play_game(game52, query_player, alphabeta_player)
#play_game(game52, alphabeta_player, query_player)
ttt = TicTacToe()
ttt.display(ttt.initial)
my_state = GameState(
to_move = 'X',
utility = '0',
board = {(1,1): 'X', (1,2): 'O', (1,3): 'X',
(2,1): 'O', (2,3): 'O',
(3,1): 'X',
},
moves = [(2,2), (3,2), (3,3)]
)
ttt.display(my_state)
random_player(ttt, my_state)
random_player(ttt, my_state)
alphabeta_player(ttt, my_state)
print(play_game(ttt, random_player, alphabeta_player))
for _ in range(10):
print(play_game(ttt, alphabeta_player, alphabeta_player))
for _ in range(10):
print(play_game(ttt, random_player, alphabeta_player))
bot_play = Canvas_TicTacToe('bot_play', 'random', 'alphabeta')
rand_play = Canvas_TicTacToe('rand_play', 'human', 'random')
ab_play = Canvas_TicTacToe('ab_play', 'human', 'alphabeta')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GameState namedtuple
Step2: Now let's get into details of all the methods in our Game class. You have to implement these methods when you create new classes that would represent your game.
Step3: The class TicTacToe has been inherited from the class Game. As mentioned earlier, you really want to do this. Catching bugs and errors becomes a whole lot easier.
Step4: First we try out our random_player(game, state). Given a game state it will give us a random move every time
Step5: The alphabeta_player(game, state) will always give us the best move possible
Step6: What the alphabeta_player does is, it simply calls the method alphabeta_full_search. They both are essentially the same. In the module, both alphabeta_full_search and minimax_decision have been implemented. They both do the same job and return the same thing, which is, the best move in the current state. It's just that alphabeta_full_search is more efficient w.r.t time because it prunes the search tree and hence, explores lesser number of states.
Step7: Demonstrating the play_game function on the game52
Step8: Note that, here, if you are the first player, the alphabeta_player plays as MIN, and if you are the second player, the alphabeta_player plays as MAX. This happens because that's the way the game is defined in the class Fig52Game. Having a look at the code of this class should make it clear.
Step9: We can print a state using the display method
Step10: Hmm, so that's the initial state of the game; no X's and no O's.
Step11: So, how does this game state looks like?
Step12: The random_player will behave how he is supposed to i.e. pseudo-randomly
Step13: But the alphabeta_player will always give the best move, as expected
Step14: Now let's make 2 players play against each other. We use the play_game function for this. The play_game function makes players play the match against each other and returns the utility for the first player, of the terminal state reached when the game ends. Hence, for our TicTacToe game, if we get the output +1, the first player wins, -1 if the second player wins, and 0 if the match ends in a draw.
Step15: The output is -1, hence random_player loses implies alphabeta_player wins.
Step16: A random_player should never win against an alphabeta_player. Let's test that.
Step17: Canvas_TicTacToe(Canvas)
Step18: Now, let's play a game ourselves against a random_player
Step19: Yay! We win. But we cannot win against an alphabeta_player, however hard we try.
|
4,364
|
<ASSISTANT_TASK:>
Python Code:
from BioTechTopics import Topics
from plotBokehJpnb2 import plotBokehInJpnb
import time
# make instance of Topics object and load the data
t=Topics()
t.load() # unpickles LDA, tf, and tf-idf representations, puts text data from JSON into pandas dataframe
#plotBokehInJpnb(t,'antibody')
start = time.time()
# these next two lines allow you submit a query to the algorithm
t.ww2('antibody') # Who's who? function - does information retrieval
data_scatter_dict = t.formatSearchResults(format='tfidf_tf_product',return_top_n=200) #user can format data in various ways
end = time.time()
print 'Query took ' + str(end-start) + ' seconds to execute'
print 'Some hits:'
print [str(data_scatter_dict['keywords'][x]) for x in [0,11,20]]
#plotBokehInJpnb(t,'digital health')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Section 1
Step2: Fast named entity return is possible because all NLP (TextRank, tfidf, named entity recognition) is done offline and stored in pickle or JSON format and loaded later.
|
4,365
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csir-csiro', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
4,366
|
<ASSISTANT_TASK:>
Python Code:
%%bash
pushd /workspace/nvidia-examples/tensorrt/tftrt/examples/object_detection/
bash ../helper_scripts/install_pycocotools.sh;
popd
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
import time
import logging
import numpy as np
import tensorflow as tf
print("TensorFlow version: ", tf.__version__)
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.saved_model import tag_constants
logging.getLogger("tensorflow").setLevel(logging.ERROR)
# check TensorRT version
print("TensorRT version: ")
!dpkg -l | grep nvinfer
def get_files(data_dir, filename_pattern):
if data_dir == None:
return []
files = tf.io.gfile.glob(os.path.join(data_dir, filename_pattern))
if files == []:
raise ValueError('Can not find any files in {} with '
'pattern "{}"'.format(data_dir, filename_pattern))
return files
VALIDATION_DATA_DIR = "/data"
validation_files = get_files(VALIDATION_DATA_DIR, 'validation*')
print('There are %d validation files. \n%s\n%s\n...'%(len(validation_files), validation_files[0], validation_files[-1]))
%%bash
FILE=/saved_model/resnet_v1_50_2016_08_28.tar.gz
if [ -f $FILE ]; then
echo "The file '$FILE' exists."
else
echo "The file '$FILE' in not found. Downloading..."
wget -P /saved_model/ http://download.tensorflow.org/models/official/20181001_resnet/savedmodels/resnet_v1_fp32_savedmodel_NHWC.tar.gz
fi
tar -xzvf /saved_model/resnet_v1_fp32_savedmodel_NHWC.tar.gz -C /saved_model
def deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)
}
with tf.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(record, feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
bbox = tf.stack([obj['image/object/bbox/%s'%x].values
for x in ['ymin', 'xmin', 'ymax', 'xmax']])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0,2,1])
text = obj['image/class/text']
return imgdata, label, bbox, text
from preprocessing import vgg_preprocess as vgg_preprocessing
def preprocess(record):
# Parse TFRecord
imgdata, label, bbox, text = deserialize_image_record(record)
#label -= 1 # Change to 0-based if not using background class
try: image = tf.image.decode_jpeg(imgdata, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST')
except: image = tf.image.decode_png(imgdata, channels=3)
image = vgg_preprocessing(image, 224, 224)
return image, label
#Define some global variables
BATCH_SIZE = 64
SAVED_MODEL_DIR = "/saved_model/resnet_v1_fp32_savedmodel_NHWC/1538686669/"
!saved_model_cli show --all --dir $SAVED_MODEL_DIR
INPUT_TENSOR = 'input_tensor:0'
OUTPUT_TENSOR = 'softmax_tensor:0'
def benchmark_saved_model(SAVED_MODEL_DIR, BATCH_SIZE=64):
# load saved model
saved_model_loaded = tf.saved_model.load(SAVED_MODEL_DIR, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
print(signature_keys)
infer = saved_model_loaded.signatures['serving_default']
print(infer.structured_outputs)
# prepare dataset iterator
dataset = tf.data.TFRecordDataset(validation_files)
dataset = dataset.map(map_func=preprocess, num_parallel_calls=20)
dataset = dataset.batch(batch_size=BATCH_SIZE, drop_remainder=True)
print('Warming up for 50 batches...')
cnt = 0
for x, y in dataset:
labeling = infer(x)
cnt += 1
if cnt == 50:
break
print('Benchmarking inference engine...')
num_hits = 0
num_predict = 0
start_time = time.time()
for x, y in dataset:
labeling = infer(x)
preds = labeling['classes'].numpy()
num_hits += np.sum(preds == y)
num_predict += preds.shape[0]
print('Accuracy: %.2f%%'%(100*num_hits/num_predict))
print('Inference speed: %.2f samples/s'%(num_predict/(time.time()-start_time)))
benchmark_saved_model(SAVED_MODEL_DIR, BATCH_SIZE=BATCH_SIZE)
FP32_SAVED_MODEL_DIR = SAVED_MODEL_DIR+"_TFTRT_FP32/1"
!rm -rf $FP32_SAVED_MODEL_DIR
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.FP32)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=SAVED_MODEL_DIR,
conversion_params=conversion_params)
converter.convert()
converter.save(FP32_SAVED_MODEL_DIR)
benchmark_saved_model(FP32_SAVED_MODEL_DIR, BATCH_SIZE=BATCH_SIZE)
FP16_SAVED_MODEL_DIR = SAVED_MODEL_DIR+"_TFTRT_FP16/1"
!rm -rf $FP16_SAVED_MODEL_DIR
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.FP16)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=SAVED_MODEL_DIR,
conversion_params=conversion_params)
converter.convert()
converter.save(FP16_SAVED_MODEL_DIR)
benchmark_saved_model(FP16_SAVED_MODEL_DIR, BATCH_SIZE=BATCH_SIZE)
num_calibration_batches = 2
# prepare calibration dataset
dataset = tf.data.TFRecordDataset(validation_files)
dataset = dataset.map(map_func=preprocess, num_parallel_calls=20)
dataset = dataset.batch(batch_size=BATCH_SIZE, drop_remainder=True)
calibration_dataset = dataset.take(num_calibration_batches)
def calibration_input_fn():
for x, y in calibration_dataset:
yield (x, )
# set a directory to write the saved model
INT8_SAVED_MODEL_DIR = SAVED_MODEL_DIR + "_TFTRT_INT8/1"
!rm -rf $INT8_SAVED_MODEL_DIR
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.INT8)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=SAVED_MODEL_DIR,
conversion_params=conversion_params)
converter.convert(calibration_input_fn=calibration_input_fn)
converter.save(INT8_SAVED_MODEL_DIR)
benchmark_saved_model(INT8_SAVED_MODEL_DIR, BATCH_SIZE=BATCH_SIZE)
!saved_model_cli show --all --dir $INT8_SAVED_MODEL_DIR
data_directory = "/data/Calibration_data"
calibration_files = [os.path.join(path, name) for path, _, files in os.walk(data_directory) for name in files]
print('There are %d calibration files. \n%s\n%s\n...'%(len(calibration_files), calibration_files[0], calibration_files[-1]))
def parse_file(filepath):
image = tf.io.read_file(filepath)
image = tf.image.decode_jpeg(image, channels=3)
image = vgg_preprocessing(image, 224, 224)
return image
num_calibration_batches = 2
# prepare calibration dataset
dataset = tf.data.Dataset.from_tensor_slices(calibration_files)
dataset = dataset.map(map_func=parse_file, num_parallel_calls=20)
dataset = dataset.batch(batch_size=BATCH_SIZE)
dataset = dataset.repeat(None)
calibration_dataset = dataset.take(num_calibration_batches)
def calibration_input_fn():
for x in calibration_dataset:
yield (x, )
# set a directory to write the saved model
INT8_SAVED_MODEL_DIR = SAVED_MODEL_DIR + "_TFTRT_INT8/2"
!rm -rf $INT8_SAVED_MODEL_DIR
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt.TrtPrecisionMode.INT8)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=SAVED_MODEL_DIR,
conversion_params=conversion_params)
converter.convert(calibration_input_fn=calibration_input_fn)
converter.save(INT8_SAVED_MODEL_DIR)
benchmark_saved_model(INT8_SAVED_MODEL_DIR)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: TF saved model
Step3: Helper functions
Step4: <a id="2"></a>
Step5: We employ saved_model_cli to inspect the inputs and outputs of the model.
Step6: This give us information on the input and output tensors as input_tensor
Step7: Next, we define a function to read in a saved mode, measuring its speed and accuracy on the validation data.
Step8: <a id="3"></a>
Step9: <a id="4"></a>
Step10: <a id="5"></a>
Step11: Step 2
Step12: Benchmarking INT8 saved model
Step13: <a id="6"></a>
Step14: We define a helper function to read and preprocess image from JPEG file.
Step15: Next, we proceed with the two-stage process of creating and calibrating TFTRT INT8 model.
Step16: As before, we can benchmark the speed and accuracy of the resulting model.
|
4,367
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import mxnet as mx
from mxnet import nd, autograd
import numpy as np
from collections import defaultdict
mx.random.seed(1)
# ctx = mx.gpu(0)
ctx = mx.cpu(0)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import scipy.fftpack
from pandas.tools import plotting
from pandas.tools.plotting import autocorrelation_plot
from datetime import datetime
sns.set_style('whitegrid')
#sns.set_context('notebook')
sns.set_context('poster')
# Make inline plots vector graphics instead of raster graphics
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('pdf', 'png')
def get_list_unique_block_indices(len_data=100, seq_length=5, n_samples=10):
returns a list of unique random int that serve as index of the first element of a block of data
args:
len_data (int): length of the data set
seq_length (int): length of the blocks to extract
n_blocks (int): # of blocks to extract
set1 = set(np.random.randint(len_data // seq_length, size=n_samples)*seq_length)
full_set = set1
while len(full_set) < n_samples:
set2 = set(np.random.randint(len_data // seq_length, size=n_samples)*seq_length)
full_set = full_set | set2
returned_list = list(full_set)[0:n_samples]
assert(len(returned_list) == n_samples)
return returned_list
def extract_random_sequence(data, seq_length=5, block_start_index=None):
columns_subset = ['car.count', 'day_of_week_int', 'cloudy_or_not_cloudy', 'weather', 'current_month']
if block_start_index is None:
block_start_index = np.random.randint(len(data)-seq_length)
data_subset = data.reset_index().loc[block_start_index:block_start_index+seq_length-1, columns_subset]
assert(len(data_subset) == (seq_length))
out_data = [list(i) for i in data_subset.values]
return out_data
def create_batch_ND_time_series(full_data, seq_length=10, num_samples=4):
out_data = []
# get a list of non-overlapping random sequence start indices
all_samples_start_indices = get_list_unique_block_indices(len(full_data), seq_length, num_samples)
assert(len(all_samples_start_indices) == num_samples)
for one_random_start_index in all_samples_start_indices:
out_data.append(extract_random_sequence(full_data, seq_length, one_random_start_index))
assert(len(out_data[-1]) == (seq_length))
return out_data
# OI data
original_data = pd.read_csv("../data/timeseries/data.csv", index_col=0)
dict_days_to_int = {'Monday': 1, 'Tuesday': 2, 'Wednesday': 3, 'Thursday': 4, 'Friday': 5, 'Saturday': 6, 'Sunday': 7}
original_data['date_']=original_data.index
original_data['current_month'] = original_data['date_'].apply(lambda x: pd.Timestamp(x).month)
original_data['day_of_week_int'] = original_data['day.of.week'].apply(lambda x: dict_days_to_int[x])
original_data['cloudy_or_not_cloudy'] = original_data['cloud.indicator'].apply(lambda x: 0 if x=='clear' else 1)
full_data = pd.DataFrame()
#############################
# JUST A RDM SAMPLE FOR NOW /!\ /!\ /!\ /!\ /!\ /!\ /!\ careful as it shuffles the time order!
#############################
full_data = original_data#.sample(1500)
# filter out cloudy data!
full_data = full_data[full_data['cloudy_or_not_cloudy']==0]
SEQ_LENGTH = 2
NUM_FEATURES = 5
# let's divide data in train (75%), dev (15%), test (10%)
# in sequences of 5 days (SEQ_LENGTH = 5)
full_data_length = len(full_data)
# the actual length of extracted sequence is SEQ_LENGTH + 1 so that we can do the shift of +1 for labels
total_num_of_sequences = full_data_length // (SEQ_LENGTH+1) - 1
# the length of extracted sequence is SEQ_LENGTH so that we can do the shift of +1 for labels
all_random_sequences = create_batch_ND_time_series(full_data, seq_length=SEQ_LENGTH+1, num_samples=total_num_of_sequences)
n_seq_train = int(total_num_of_sequences*0.75)
n_seq_dev = int(total_num_of_sequences*0.9) - int(total_num_of_sequences*0.75)
n_seq_test = len(all_random_sequences) - int(total_num_of_sequences*0.9)
data_train = np.array(all_random_sequences[0:n_seq_train])
data_dev = np.array(all_random_sequences[n_seq_train:n_seq_train+n_seq_dev])
data_test = np.array(all_random_sequences[n_seq_train+n_seq_dev:])
print('SHAPES of ALL, TRAIN, DEV, TEST:')
print(np.array(all_random_sequences).shape)
print(np.array(data_train).shape)
print(np.array(data_dev).shape)
print(np.array(data_test).shape)
assert(data_train.shape == (n_seq_train, SEQ_LENGTH+1, NUM_FEATURES))
assert(data_dev.shape == (n_seq_dev, SEQ_LENGTH+1, NUM_FEATURES))
assert(data_test.shape == (n_seq_test, SEQ_LENGTH+1, NUM_FEATURES))
# num_sampling_points = min(SEQ_LENGTH, 400)
# (data_train.sample(4).transpose().iloc[range(0, SEQ_LENGTH, SEQ_LENGTH//num_sampling_points)]).plot()
# print (data_train)
# print(data_train[:, :-1, :]) # inputs
# batch_size = 5
# num_batches_train = data_train.shape[0] // batch_size
# print(num_batches_train)
# print ( nd.array(data_train[:, :-1, :]).reshape((num_batches_train, 5, SEQ_LENGTH-1, NUM_FEATURES)) )
batch_size = 32
batch_size_test = 1
seq_length = SEQ_LENGTH
num_batches_train = data_train.shape[0] // batch_size
num_batches_test = data_test.shape[0] // batch_size_test
num_features = NUM_FEATURES # we do 1D time series for now, this is like vocab_size = 1 for characters
# inputs are from t0 to t_seq_length - 1. because the last point is kept for the
# output ("label") of the penultimate point
data_train_inputs = data_train[:, :-1, :]
data_train_labels = data_train[:, 1:, :]
data_test_inputs = data_test[:, :-1, :]
data_test_labels = data_test[:, 1:, :]
train_data_inputs = nd.array(data_train_inputs).reshape((num_batches_train, batch_size, seq_length, num_features))
train_data_labels = nd.array(data_train_labels).reshape((num_batches_train, batch_size, seq_length, num_features))
test_data_inputs = nd.array(data_test_inputs).reshape((num_batches_test, batch_size_test, seq_length, num_features))
test_data_labels = nd.array(data_test_labels).reshape((num_batches_test, batch_size_test, seq_length, num_features))
train_data_inputs = nd.swapaxes(train_data_inputs, 1, 2)
train_data_labels = nd.swapaxes(train_data_labels, 1, 2)
test_data_inputs = nd.swapaxes(test_data_inputs, 1, 2)
test_data_labels = nd.swapaxes(test_data_labels, 1, 2)
print('num_mini-batches_train={0} | seq_length={2} | mini-batch_size={1} | num_features={3}'.format(num_batches_train, batch_size, seq_length, num_features))
print('train_data_inputs shape: ', train_data_inputs.shape)
print('train_data_labels shape: ', train_data_labels.shape)
# print(data_train_inputs.values)
# print(train_data_inputs[0]) # see what one batch looks like
num_inputs = NUM_FEATURES # for a 1D time series, this is just a scalar equal to 1.0
num_outputs = NUM_FEATURES # same comment
num_hidden_units = [64] # num of hidden units in each hidden LSTM layer
num_hidden_layers = len(num_hidden_units) # num of hidden LSTM layers
num_units_layers = [num_features] + num_hidden_units
########################
# Weights connecting the inputs to the hidden layer
########################
Wxg, Wxi, Wxf, Wxo, Whg, Whi, Whf, Who, bg, bi, bf, bo = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
for i_layer in range(1, num_hidden_layers+1):
num_inputs = num_units_layers[i_layer-1]
num_hidden_units = num_units_layers[i_layer]
Wxg[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
Wxi[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
Wxf[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
Wxo[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
########################
# Recurrent weights connecting the hidden layer across time steps
########################
Whg[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
Whi[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
Whf[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
Who[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
########################
# Bias vector for hidden layer
########################
bg[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
bi[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
bf[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
bo[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
########################
# Weights to the output nodes
########################
Why = nd.random_normal(shape=(num_units_layers[-1], num_outputs), ctx=ctx) * .01
by = nd.random_normal(shape=num_outputs, ctx=ctx) * .01
params = []
for i_layer in range(1, num_hidden_layers+1):
params += [Wxg[i_layer], Wxi[i_layer], Wxf[i_layer], Wxo[i_layer], Whg[i_layer], Whi[i_layer], Whf[i_layer], Who[i_layer], bg[i_layer], bi[i_layer], bf[i_layer], bo[i_layer]]
params += [Why, by] # add the output layer
for param in params:
param.attach_grad()
def softmax(y_linear, temperature=1.0):
lin = (y_linear-nd.max(y_linear)) / temperature
exp = nd.exp(lin)
partition = nd.sum(exp, axis=0, exclude=True).reshape((-1,1))
return exp / partition
def cross_entropy(yhat, y):
return - nd.mean(nd.sum(y * nd.log(yhat), axis=0, exclude=True))
def rmse(yhat, y):
return nd.mean(nd.sqrt(nd.sum(nd.power(y - yhat, 2), axis=0, exclude=True)))
def average_ce_loss(outputs, labels):
assert(len(outputs) == len(labels))
total_loss = 0.
for (output, label) in zip(outputs,labels):
total_loss = total_loss + cross_entropy(output, label)
return total_loss / len(outputs)
def average_rmse_loss(outputs, labels):
assert(len(outputs) == len(labels))
total_loss = 0.
for (output, label) in zip(outputs,labels):
total_loss = total_loss + rmse(output, label)
return total_loss / len(outputs)
from exceptions import ValueError
def SGD(params, learning_rate):
for param in params:
# print('grrrrr: ', param.grad)
param[:] = param - learning_rate * param.grad
def adam(params, learning_rate, M , R, index_adam_call, beta1, beta2, eps):
k = -1
for param in params:
k += 1
M[k] = beta1 * M[k] + (1. - beta1) * param.grad
R[k] = beta2 * R[k] + (1. - beta2) * (param.grad)**2
# bias correction since we initilized M & R to zeros, they're biased toward zero on the first few iterations
m_k_hat = M[k] / (1. - beta1**(index_adam_call))
r_k_hat = R[k] / (1. - beta2**(index_adam_call))
if((np.isnan(M[k].asnumpy())).any() or (np.isnan(R[k].asnumpy())).any()):
# print('GRRRRRR ', M, K)
raise(ValueError('Nans!!'))
# print('grrrrr: ', param.grad)
param[:] = param - learning_rate * m_k_hat / (nd.sqrt(r_k_hat) + eps)
# print('m_k_hat r_k_hat', m_k_hat, r_k_hat)
return params, M, R
def single_lstm_unit_calcs(X, c, Wxg, h, Whg, bg, Wxi, Whi, bi, Wxf, Whf, bf, Wxo, Who, bo):
g = nd.tanh(nd.dot(X, Wxg) + nd.dot(h, Whg) + bg)
i = nd.sigmoid(nd.dot(X, Wxi) + nd.dot(h, Whi) + bi)
f = nd.sigmoid(nd.dot(X, Wxf) + nd.dot(h, Whf) + bf)
o = nd.sigmoid(nd.dot(X, Wxo) + nd.dot(h, Who) + bo)
#######################
c = f * c + i * g
h = o * nd.tanh(c)
return c, h
def deep_lstm_rnn(inputs, h, c, temperature=1.0):
h: dict of nd.arrays, each key is the index of a hidden layer (from 1 to whatever).
Index 0, if any, is the input layer
outputs = []
# inputs is one BATCH of sequences so its shape is number_of_seq, seq_length, features_dim
# (latter is 1 for a time series, vocab_size for a character, n for a n different times series)
for X in inputs:
# X is batch of one time stamp. E.g. if each batch has 37 sequences, then the first value of X will be a set of the 37 first values of each of the 37 sequences
# that means each iteration on X corresponds to one time stamp, but it is done in batches of different sequences
h[0] = X # the first hidden layer takes the input X as input
for i_layer in range(1, num_hidden_layers+1):
# lstm units now have the 2 following inputs:
# i) h_t from the previous layer (equivalent to the input X for a non-deep lstm net),
# ii) h_t-1 from the current layer (same as for non-deep lstm nets)
c[i_layer], h[i_layer] = single_lstm_unit_calcs(h[i_layer-1], c[i_layer], Wxg[i_layer], h[i_layer], Whg[i_layer], bg[i_layer], Wxi[i_layer], Whi[i_layer], bi[i_layer], Wxf[i_layer], Whf[i_layer], bf[i_layer], Wxo[i_layer], Who[i_layer], bo[i_layer])
yhat_linear = nd.dot(h[num_hidden_layers], Why) + by
# yhat is a batch of several values of the same time stamp
# this is basically the prediction of the sequence, which overlaps most of the input sequence, plus one point (character or value)
# yhat = softmax(yhat_linear, temperature=temperature)
# yhat = nd.sigmoid(yhat_linear)
# yhat = nd.tanh(yhat_linear)
yhat = yhat_linear # we cant use a 1.0-bounded activation function since amplitudes can be greater than 1.0
outputs.append(yhat) # outputs has same shape as inputs, i.e. a list of batches of data points.
# print('some shapes... yhat outputs', yhat.shape, len(outputs) )
return (outputs, h, c)
INDEX_TARGET_VALUE = 0
def test_prediction(one_input_seq, one_label_seq, temperature=1.0):
# WE ASSUME the first value in input vector is the variable of interest
#####################################
# Set the initial state of the hidden representation ($h_0$) to the zero vector
##################################### # some better initialization needed??
h, c = {}, {}
for i_layer in range(1, num_hidden_layers+1):
h[i_layer] = nd.zeros(shape=(batch_size_test, num_units_layers[i_layer]), ctx=ctx)
c[i_layer] = nd.zeros(shape=(batch_size_test, num_units_layers[i_layer]), ctx=ctx)
outputs, h, c = deep_lstm_rnn(one_input_seq, h, c, temperature=temperature)
return outputs[-1][0].asnumpy()[INDEX_TARGET_VALUE], one_label_seq.asnumpy()[-1].flatten()[INDEX_TARGET_VALUE], outputs, one_label_seq
def check_prediction(index):
if index >= len(test_data_inputs):
index = np.random.randint(len(test_data_inputs))
o, label, outputs, labels = test_prediction(test_data_inputs[index], test_data_labels[index], temperature=1.0)
prediction = round(o, 3)
true_label = round(label, 3)
outputs = [float(i.asnumpy().flatten()[INDEX_TARGET_VALUE]) for i in outputs] # if batch_size_test=1 then this float() will work, otherwise, nope.
true_labels = list(test_data_labels[index].asnumpy()[:,:,INDEX_TARGET_VALUE].flatten())
df = pd.DataFrame([outputs, true_labels]).transpose()
df.columns = ['predicted', 'true']
if true_label != 0:
rel_error = round(100. * (prediction / (true_label+1e-5) - 1.0), 2)
else:
rel_error = 100.
# print('\nprediction = {0} | actual_value = {1} | rel_error = {2}'.format(prediction, true_label, rel_error))
return df
epochs = 10000 # one epoch is one pass over the entire training set
moving_loss = 0.
learning_rate = 0.001 # 0.1 works for a [8, 8] after about 70 epochs of 32-sized batches
# Adam Optimizer stuff
beta1 = .9
beta2 = .999
index_adam_call = 0
# M & R arrays to keep track of momenta in adam optimizer. params is a list that contains all ndarrays of parameters
M = {k: nd.zeros_like(v) for k, v in enumerate(params)}
R = {k: nd.zeros_like(v) for k, v in enumerate(params)}
df_moving_loss = pd.DataFrame(columns=['Loss', 'Error'])
df_moving_loss.index.name = 'Epoch'
# needed to update plots on the fly
%matplotlib notebook
fig, axes_fig1 = plt.subplots(1,1, figsize=(6,3))
fig2, axes_fig2 = plt.subplots(1,1, figsize=(6,3))
for e in range(epochs):
############################
# Attenuate the learning rate by a factor of 2 every 100 epochs
############################
if ((e+1) % 1000 == 0):
learning_rate = learning_rate / 2.0 # TODO check if its ok to adjust learning_rate when using Adam Optimizer
h, c = {}, {}
for i_layer in range(1, num_hidden_layers+1):
h[i_layer] = nd.zeros(shape=(batch_size, num_units_layers[i_layer]), ctx=ctx)
c[i_layer] = nd.zeros(shape=(batch_size, num_units_layers[i_layer]), ctx=ctx)
for i in range(num_batches_train):
data_one_hot = train_data_inputs[i]
label_one_hot = train_data_labels[i]
with autograd.record():
outputs, h, c = deep_lstm_rnn(data_one_hot, h, c)
loss = average_rmse_loss(outputs, label_one_hot)
loss.backward()
# SGD(params, learning_rate)
index_adam_call += 1 # needed for bias correction in Adam optimizer
params, M, R = adam(params, learning_rate, M, R, index_adam_call, beta1, beta2, 1e-8)
##########################
# Keep a moving average of the losses
##########################
if (i == 0) and (e == 0):
moving_loss = nd.mean(loss).asscalar()
else:
moving_loss = .99 * moving_loss + .01 * nd.mean(loss).asscalar()
df_moving_loss.loc[e] = round(moving_loss, 4)
############################
# Predictions and plots
############################
data_prediction_df = check_prediction(index=e)
if not (e%50):
axes_fig1.clear()
data_prediction_df.plot(ax=axes_fig1)
fig.canvas.draw()
prediction = round(data_prediction_df.tail(1)['predicted'].values.flatten()[-1], 3)
true_label = round(data_prediction_df.tail(1)['true'].values.flatten()[-1], 3)
if true_label != 0:
rel_error = round(100. * np.abs(prediction / (true_label+1e-5) - 1.0), 2)
else:
rel_error = moving_rel_error
if not (e%50):
print("Epoch = {0} | Loss = {1} | Prediction = {2} True = {3} Error = {4}".format(e, moving_loss, prediction, true_label, rel_error ))
if not (e%50):
axes_fig2.clear()
if e == 0:
moving_rel_error = rel_error
else:
moving_rel_error = .99 * moving_rel_error + .01 * rel_error
df_moving_loss.loc[e, ['Error']] = moving_rel_error
if not (e%50):
axes_loss_plot = df_moving_loss.plot(ax=axes_fig2, secondary_y='Loss', color=['r','b'])
axes_loss_plot.right_ax.grid(False)
# axes_loss_plot.right_ax.set_yscale('log')
fig2.canvas.draw()
%matplotlib inline
# print(outputs[0].asnumpy()[0].flatten())
# print(test_data_labels[0].asnumpy()[:,:,0].flatten())
# [float(i.asnumpy().flatten()) for i in outputs]
# print([i.asnumpy() for i in outputs])
# one_label_seq = test_data_labels[0]
# print(outputs[-1][0])
# print(one_label_seq)
# print(rmse(outputs[-1][0], one_label_seq))
# print(test_data_inputs[0].asnumpy()[-1].flatten()[0])
# print([i.asnumpy().flatten() for i in outputs])
# print(float(outputs[0].asnumpy()[:, 0].flatten()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Dataset
Step3: Check the data real quick
Step4: Preparing the data for training
Step5: Long short-term memory (LSTM) RNNs
Step6: Attach the gradients
Step7: Softmax Activation
Step8: Cross-entropy loss function
Step9: Averaging the loss over the sequence
Step10: Optimizer
Step12: Define the model
Step13: Test and visualize predictions
|
4,368
|
<ASSISTANT_TASK:>
Python Code:
import harness
from harness import Harness
from pandas import Categorical
from sklearn import datasets, discriminant_analysis
iris = datasets.load_iris()
# Harness is just a dataframe
df = Harness(
data=iris['data'], index=Categorical(iris['target']),
estimator=discriminant_analysis.LinearDiscriminantAnalysis(),
feature_level=-1, # the feature level indicates an index
# in the dataframe. -1 is the last index.
)
# Fit the model with 50 random rows.
df.sample(50).fit()
# Transform the dataframe
transformed = df.transform()
transformed.set_index(
df.index
.rename_categories(iris['target_names'])
.rename('species'), append=True, inplace=True,
)
# Plot the dataframe using Bokeh charts.
with transformed.reset_index().DataSource(x=0, y=1) as source:
source.Scatter(color='species')
source.show()
%%script bash --bg
python setup.py develop
watchmedo tricks tricks.yaml
# Execute this cell to stop watching the files
%killbgscripts
%%script bash
jupyter nbconvert harness/tests/*.ipynb --execute --to notebook --inplace
py.test
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: More Examples
Step2: Build & Run Tests
|
4,369
|
<ASSISTANT_TASK:>
Python Code:
# Import Pandas & NumPy
import pandas as pd
import numpy as np
# Create a tiny dataset, as a list of tuples
name = ('Oslo','Copenhaguen','Helsinki','Stockholm','Reykjavik')
pop = ( 647676, 583348, 626305, 917297, 121822 )
area = ( 480.76, 86.20, 715.49, 188.0, 273 )
data = [ (1000+i,n,p,s) for i, (n,p,s) in enumerate(zip(name,pop,area)) ]
# Create the dataframe from the list of tuples. We need to add the names of the columns, plus
# the column(s) we want to be used as row index
df = pd.DataFrame.from_records( data=data, columns=('id','name','population','area'), index=['id'] )
print df
# See the options we've got for data formatting
pd.describe_option('display')
df
# Check dataframe dimensions
print df.shape
# Check dataframe components
print df.index
print df.columns
df['name']
# Or also
df.name
df[ ['name','population'] ]
df.loc[:,['name','population']]
# One row, using the index. Note that in this case our row index is the 'id' column
df.loc[1000]
# Two rows
df.loc[1002:1003]
# Two rows, but only selected columns
df.loc[1002:1003,'name':'population']
# Get the first row
df.iloc[0]
# Get the last row
df.iloc[-1]
df[df.area<200]
df[ (df.area<200) & (df.population>600000) ]
# This variant returns the same size as the original dataframe, but fills only the rows that satisty the condition
df.where( df.area<200 )
df.sample(n=3)
# We create a new column by combining data from other columns
df.loc[:,'density'] = df.loc[:,'population']/df.loc[:,'area']
df.head()
df2 = df.assign( density2 = lambda x : x.population/x.area )
df2.head()
# Find the next id to insert
next = df.tail(1).index.values[0] + 1
# Define new rows. This time, for a change, we'll be using a dict of lists as input data
name = ('Tallinn', 'Riga', 'Vilnius')
pop = ( 439286, 641007, 542664 )
size = ( 159.2, 304, 401 )
data2 = { 'id' : range(next,next+len(name)),
'name' : name,
'population' : pop,
'area' : size }
#data = [ {'id':next+i, 'name':n, 'population': p, size:'s' }
# for i, (n,p,s) in enumerate(zip(name,pop,size)) ]
# Create a dataframe from the dict of lists
df2 = pd.DataFrame( data2 )
# Set the column(s) to be used as the row index in this new dataframe
df2.set_index( 'id', inplace=True )
#df2 = pd.DataFrame.from_dict( data )
#df.append( data, ignore_index=True)
df2
# Now append this set of rows to the original one
df = df.append(df2)
df
# Find the rows having a missing density value. Obviously they will be the just added ones
missing = df[ np.isnan(df.density) ].index
df.loc[missing]
df.loc[missing].density = df.loc[missing].population/df.loc[missing].area
df.loc[missing]
df.loc[missing,'density'] = df.loc[missing,'population']/df.loc[missing,'area']
df.loc[missing].density
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 Creating a DataFrame
Step2: Let's view the dataframe. We can print it
Step3: Or we can just show it, and it will be nicely formatted.
Step4: 2 Fetching columns
Step5: We can also get more than one column.
Step6: Same thing, but this time we get a reference to the original DataFrame by using a locator operator (see next section)
Step7: 3 Accessing DataFrame contents
Step8: 3.2 By position
Step9: 3.3 By boolean selection
Step10: 3.4 Random sample
Step11: 4 Augmenting a DataFrame
Step12: Another way of doing it is to use the assign() method. Ir returns a new DataFrame with the additions.
Step13: 4.2 Adding rows
Step14: Now let's add the missing densities. First naive attempt
Step15: It didn't work. Why? Because we are selecting in two steps
Step16: This time it works
|
4,370
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
s=[]
i=0
def find_peaks(a):
Find the indices of the local maxima in a sequence.
# YOUR CODE HERE
if a[0]>a[1]: #if the first number is bigger than the second number
s.append(0) #add 0 as a peak
for x in range (len(a)-1): #
if a[x]>a[x-1] and a[x]>a[x+1] and x!=0: #if the current number is bigger than the one before it and the one after it
# print (x)
s.append(x) #add it to the list of peaks
if a[-1]>a[-2]: #if the last number is bigger than the second to last one it is a peak
# print (len(a)-1)
s.append(len(a)-1) #add the location of the last number to the list of locations
return s
#below here is used for testing, not sure why assert tests are not working since my tests do
# p2 = find_peaks(np.array([0,1,2,3]))
# p2
p1 = find_peaks([2,0,1,0,2,0,1])
p1
# p3 = find_peaks([3,2,1,0])
# p3
# np.shape(p1)
# y=np.array([0,2,4,6])
# np.shape(y)
# print(s)
p1 = find_peaks([2,0,1,0,2,0,1])
assert np.allclose(p1, np.array([0,2,4,6]))
p2 = find_peaks(np.array([0,1,2,3]))
assert np.allclose(p2, np.array([3]))
p3 = find_peaks([3,2,1,0])
assert np.allclose(p3, np.array([0]))
from sympy import pi, N
pi_digits_str = str(N(pi, 10001))[2:]
# YOUR CODE HERE
# num=[]
# pi_digits_str[0]
# for i in range(len(pi_digits_str)):
# num[i]=pi_digits_str[i]
f=plt.figure(figsize=(12,8))
plt.title("Histogram of Distances between Peaks in Pi")
plt.ylabel("Number of Occurences")
plt.xlabel("Distance from Previous Peak")
plt.tick_params(direction='out')
plt.box(True)
plt.grid(False)
test=np.array(list(pi_digits_str),dtype=np.int)
peaks=find_peaks(test)
dist=np.diff(peaks)
plt.hist(dist,bins=range(15));
assert True # use this for grading the pi digits histogram
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Peak finding
Step3: Here is a string with the first 10000 digits of $\pi$ (after the decimal). Write code to perform the following
|
4,371
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
train = pd.read_csv("data/trainRF.csv")
test = pd.read_csv("data/testRF.csv")
train_13 = train.drop(['day','month', 'duration'], axis = 1)
test_13 = test.drop(['day','month', 'duration'], axis = 1)
from sklearn import preprocessing
categorical_variables =train_13.dtypes[train_13.dtypes=="object"].index.tolist()
for i in categorical_variables:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_13[i]))
train_13[i] = lbl.transform(train_13[i])
test_13[i] = lbl.transform(test_13[i])
train_13_data = train_13.ix[:,0:13]
train_13_target = train_13.iloc[:, -1]
test_13_data = train_13.ix[:,0:13]
test_13_target = train_13.iloc[:, -1]
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(train_13_data, train_13_target)
importances = clf.feature_importances_
# Importance of the features in the forest
importances
#Calculate the standard deviation of variable importance
std = np.std([tree.feature_importances_ for tree in clf.estimators_], axis=0)
std
indices = np.argsort(importances)[::-1]
indices
length = train_13_data.shape[1]
labels = []
for i in range(length):
labels.append(train_13_data.columns[indices[i]])
# Plot the feature importances of the forest
plt.figure(figsize=(16, 6))
plt.title("Feature importances")
plt.bar(range(length), importances[indices], yerr=std[indices], align="center")
plt.xticks(range(length), labels)
plt.xlim([-1, length])
plt.show()
import warnings
warnings.filterwarnings('ignore')
clf2 = RandomForestClassifier(warm_start=True, oob_score=True, max_features=None)
clf2.fit(train_13_data, train_13_target)
clf2.oob_score_
min_estimators = 10
max_estimators = 50
error_rate = []
for i in range(min_estimators, max_estimators + 1):
clf2.set_params(n_estimators=i)
clf2.fit(train_13_data, train_13_target)
oob_error = 1 - clf2.oob_score_
error_rate.append(oob_error)
error_rate_indice = [x for x in range(min_estimators, max_estimators + 1)]
plt.figure()
plt.plot(error_rate_indice, error_rate)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementing Random Forest
Step2: Key input parameters (in addition to decision trees)
Step3: Out-of-Bag Error
|
4,372
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
from itertools import count
from pathlib import Path
sys.path.insert(0, str(Path(os.environ['HOME'], 'git', 'skanb', 'pea-test-set')))
import utils as asvt_utils
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack
from astropy.time import Time
import tables
from scipy import stats
from scipy.interpolate import CubicSpline
from Chandra.Time import DateTime
from astropy.table import Table
from chandra_aca.star_probs import get_box_delta
%matplotlib inline
SKA = Path(os.environ['SKA'])
# Make a map of AGASC_ID to AGACS 1.7 MAG_ACA. The acq_stats.h5 file has whatever MAG_ACA
# was in place at the time of planning the loads.
with tables.open_file(str(SKA / 'data' / 'agasc' / 'miniagasc_1p7.h5'), 'r') as h5:
agasc_mag_aca = h5.root.data.col('MAG_ACA')
agasc_id = h5.root.data.col('AGASC_ID')
has_color3 = h5.root.data.col('RSV3') != 0 #
red_star = np.isclose(h5.root.data.col('COLOR1'), 1.5)
mag_aca_err = h5.root.data.col('MAG_ACA_ERR') / 100
red_mag_err = red_star & ~has_color3 # MAG_ACA, MAG_ACA_ERR is potentially inaccurate
agasc1p7_idx = {id: idx for id, idx in zip(agasc_id, count())}
agasc1p7 = Table([agasc_mag_aca, mag_aca_err, red_mag_err],
names=['mag_aca', 'mag_aca_err', 'red_mag_err'], copy=False)
acq_file = str(SKA / 'data' / 'acq_stats' / 'acq_stats.h5')
with tables.open_file(str(acq_file), 'r') as h5:
cols = h5.root.data.cols
names = {'tstart': 'guide_tstart',
'obsid': 'obsid',
'obc_id': 'acqid',
'halfwidth': 'halfw',
'warm_pix': 'n100_warm_frac',
'mag_aca': 'mag_aca',
'mag_obs': 'mean_trak_mag',
'known_bad': 'known_bad',
'color': 'color1',
'img_func': 'img_func',
'ion_rad': 'ion_rad',
'sat_pix': 'sat_pix',
'agasc_id': 'agasc_id',
't_ccd': 'ccd_temp',
'slot': 'slot'}
acqs = Table([getattr(cols, h5_name)[:] for h5_name in names.values()],
names=list(names.keys()))
year_q0 = 1999.0 + 31. / 365.25 # Jan 31 approximately
acqs['year'] = Time(acqs['tstart'], format='cxcsec').decimalyear.astype('f4')
acqs['quarter'] = (np.trunc((acqs['year'] - year_q0) * 4)).astype('f4')
# Create 'fail' column, rewriting history as if the OBC always
# ignore the MS flag in ID'ing acq stars.
#
# UPDATE: is ion_rad being ignored on-board? (Not as of 2018-11)
#
obc_id = acqs['obc_id']
obc_id_no_ms = (acqs['img_func'] == 'star') & ~acqs['sat_pix'] & ~acqs['ion_rad']
acqs['fail'] = np.where(obc_id | obc_id_no_ms, 0.0, 1.0)
acqs['mag_aca'] = [agasc1p7['mag_aca'][agasc1p7_idx[agasc_id]] for agasc_id in acqs['agasc_id']]
acqs['red_mag_err'] = [agasc1p7['red_mag_err'][agasc1p7_idx[agasc_id]] for agasc_id in acqs['agasc_id']]
acqs['mag_aca_err'] = [agasc1p7['mag_aca_err'][agasc1p7_idx[agasc_id]] for agasc_id in acqs['agasc_id']]
acqs['asvt'] = False
# Filter for year and mag (previously used data through 2007:001)
#
# UPDATE this to be between 4 to 5 years from time of recalibration.
#
# The mag range is restricted to 8.5 < mag < 10.7 because the model
# is only calibrated in that range. Above 10.7 there is concern that
# stats are actually unreliable (fraction of imposters that happen to
# is high?) This upper limit is something to play with.
#
year_min = 2014.5
year_max = DateTime('2018-10-30').frac_year
ok = ((acqs['year'] > year_min) & (acqs['year'] < year_max) &
(acqs['mag_aca'] > 7.0) & (acqs['mag_aca'] < 11) &
(~np.isclose(acqs['color'], 0.7)))
# Filter known bad obsids
print('Filtering known bad obsids, start len = {}'.format(np.count_nonzero(ok)))
bad_obsids = [
# Venus
2411,2414,6395,7306,7307,7308,7309,7311,7312,7313,7314,7315,7317,7318,7406,583,
7310,9741,9742,9743,9744,9745,9746,9747,9749,9752,9753,9748,7316,15292,16499,
16500,16501,16503,16504,16505,16506,16502,
]
for badid in bad_obsids:
ok = ok & (acqs['obsid'] != badid)
print('Filtering known bad obsids, end len = {}'.format(np.count_nonzero(ok)))
peas = Table.read('pea_analysis_results_2018_299_CCD_temp_performance.csv', format='ascii.csv')
peas = asvt_utils.flatten_pea_test_data(peas)
# Fuzz mag and T_ccd by a bit for plotting and fitting.
fpeas = Table([peas['star_mag'], peas['ccd_temp'], peas['search_box_hw']],
names=['mag_aca', 't_ccd', 'halfwidth'])
fpeas['year'] = np.random.uniform(2019.0, 2019.5, size=len(peas))
fpeas['color'] = 1.0
fpeas['quarter'] = (np.trunc((fpeas['year'] - year_q0) * 4)).astype('f4')
fpeas['fail'] = 1.0 - peas['search_success']
fpeas['asvt'] = True
fpeas['red_mag_err'] = False
fpeas['mag_obs'] = 0.0
data_all = vstack([acqs[ok]['year', 'fail', 'mag_aca', 't_ccd', 'halfwidth', 'quarter',
'color', 'asvt', 'red_mag_err', 'mag_obs'],
fpeas])
data_all.sort('year')
# Adjust probability (in probit space) for box size. See:
# https://github.com/sot/skanb/blob/master/pea-test-set/fit_box_size_acq_prob.ipynb
data_all['box_delta'] = get_box_delta(data_all['halfwidth'])
data_all = data_all.group_by('quarter')
data_all0 = data_all.copy() # For later augmentation with simulated red_mag_err stars
data_mean = data_all.groups.aggregate(np.mean)
plt.figure(figsize=(13, 4))
ok = data_all['asvt'] == False
plt.subplot(1, 2, 1)
plt.hist(data_all['mag_aca'][ok], bins=50)
plt.title('Mag_aca distribution')
plt.grid()
plt.subplot(1, 2, 2)
plt.hist(data_all['t_ccd'][ok] + np.random.uniform(-0.3, 0.3, np.count_nonzero(ok)), bins=24)
plt.grid()
plt.title('ACA CCD temperature distribution');
def p_fail_binned(pars,
t_ccd,
box_delta=0):
Acquisition probability model for binned data (no mag dependence)
:param pars: 7 parameters (3 x offset, 3 x scale, p_fail for bright stars)
:param tc, tc2: t_ccd, t_ccd ** 2
:param box_delta: search box half width (arcsec)
p0, p1, p2 = pars
# Make sure box_delta has right dimensions
t_ccd, box_delta = np.broadcast_arrays(t_ccd, box_delta)
tc = (t_ccd + 16) / 10
tcx = np.linspace(-0.1, 2.0, 40)
tcy = np.cumsum(stats.norm.cdf(p1 + p2 * tcx))
probit_p_fail = p0 + np.interp(tc, tcx, tcy) + box_delta
p_fail = stats.norm.cdf(probit_p_fail) # transform from probit to linear probability
return p_fail
def p_acq_fail(data=None):
Sherpa fit function wrapper to ensure proper use of data in fitting.
if data is None:
data = data_all
def sherpa_func(pars, x=None):
return p_fail_binned(pars, data['t_ccd'], data['box_delta'])
return sherpa_func
def fit_poly_model(data):
from sherpa import ui
comp_names = ['p0', 'p1', 'p2']
data_id = 1
ui.set_method('simplex')
ui.set_stat('cash')
ui.load_user_model(p_acq_fail(data), 'model')
ui.add_user_pars('model', comp_names)
ui.set_model(data_id, 'model')
ui.load_arrays(data_id, np.array(data['year']), np.array(data['fail'], dtype=np.float))
# Initial fit values from fit of all data
fmod = ui.get_model_component('model')
for comp_name in comp_names:
setattr(fmod, comp_name, 0.0)
comp = getattr(fmod, comp_name)
comp.max = 10
fmod.p0.min = -10
# fmod.p1.min = 0
# fmod.p2.min = 0
# fmod.p2.max = 0.5
ui.fit(data_id)
return ui.get_fit_results()
def plot_fails_mag_aca_vs_t_ccd(mag_bins, year0=None):
if year0 is None:
year0 = data_all['year'].min()
ok = (data_all['year'] >= year0) & ~data_all['fail'].astype(bool)
da = data_all[ok]
fuzzx = np.random.uniform(-0.3, 0.3, len(da))
fuzzy = np.random.uniform(-0.125, 0.125, len(da))
plt.plot(da['t_ccd'] + fuzzx, da['mag_aca'] + fuzzy, '.C0', markersize=4)
ok = (data_all['year'] > year0) & data_all['fail'].astype(bool)
da = data_all[ok]
fuzzx = np.random.uniform(-0.3, 0.3, len(da))
fuzzy = np.random.uniform(-0.125, 0.125, len(da))
plt.plot(da['t_ccd'] + fuzzx, da['mag_aca'] + fuzzy, '.C1', markersize=4, alpha=0.8)
# plt.xlim(-18, -10)
# plt.ylim(7.0, 11.1)
x0, x1 = plt.xlim()
for y in mag_bins:
plt.plot([x0, x1], [y, y], '-', color='r', linewidth=2, alpha=0.8)
plt.xlabel('T_ccd (C)')
plt.ylabel('Mag_aca')
plt.title(f'Acq successes (blue) and failures (orange) since {year0:.1f}')
plt.grid()
def plot_fit_grouped(pars, group_col, group_bin, mask=None, log=False, colors='br', label=None, probit=False):
data = data_all if mask is None else data_all[mask]
data['model'] = p_acq_fail(data)(pars)
group = np.trunc(data[group_col] / group_bin)
data = data.group_by(group)
data_mean = data.groups.aggregate(np.mean)
len_groups = np.diff(data.groups.indices)
data_fail = data_mean['fail']
model_fail = np.array(data_mean['model'])
fail_sigmas = np.sqrt(data_fail * len_groups) / len_groups
# Possibly plot the data and model probabilities in probit space
if probit:
dp = stats.norm.ppf(np.clip(data_fail + fail_sigmas, 1e-6, 1-1e-6))
dm = stats.norm.ppf(np.clip(data_fail - fail_sigmas, 1e-6, 1-1e-6))
data_fail = stats.norm.ppf(data_fail)
model_fail = stats.norm.ppf(model_fail)
fail_sigmas = np.vstack([data_fail - dm, dp - data_fail])
plt.errorbar(data_mean[group_col], data_fail, yerr=fail_sigmas,
fmt='.' + colors[1:], label=label, markersize=8)
plt.plot(data_mean[group_col], model_fail, '-' + colors[0])
if log:
ax = plt.gca()
ax.set_yscale('log')
def plot_fit_all(parvals, mask=None, probit=False):
if mask is None:
mask = np.ones(len(data_all), dtype=bool)
mt = mag_filter(8.5, 10.8) & mask
plt.figure(figsize=(12, 4))
for probit in True, False:
plt.subplot(1, 2, int(probit) + 1)
for v0, v1, colors in ((-11, -10, 'br'),
(-12, -11, 'gk'),
(-13, -12, 'cm'),
(-14, -13, 'br'),
(-15, -14, 'gk')):
plot_fit_grouped(parvals, 'mag_aca', 0.25, t_ccd_filter(v0, v1) & mt,
colors=colors, label=f'{v0} < t_ccd < {v1}', probit=probit)
plt.legend(loc='upper left')
plt.ylim(-3, 3) if probit else plt.ylim(-0.1, 1.1)
plt.ylabel('p_fail')
plt.xlabel('year')
plt.tight_layout()
plt.grid()
mt = t_ccd_filter(-16, -2) & mask
plt.figure(figsize=(12, 4))
for probit in True, False:
plt.subplot(1, 2, int(probit) + 1)
for v0, v1, colors in ((10.3, 10.7, 'gk'),
(10, 10.3, 'cm'),
(9.5, 10, 'br'),
(9, 9.5, 'gk')):
plot_fit_grouped(parvals, 'year', 0.25, mag_filter(v0, v1) & mt,
colors=colors, label=f'{v0} < mag < {v1}', probit=probit)
plt.legend(loc='upper left')
plt.ylim(-3, 3) if probit else plt.ylim(-0.1, 1.1)
plt.ylabel('p_fail')
plt.xlabel('mag_aca')
plt.tight_layout()
plt.grid()
mt = t_ccd_filter(-16, -2) & mask
plt.figure(figsize=(12, 4))
for probit in True, False:
plt.subplot(1, 2, int(probit) + 1)
for v0, v1, colors in ((10.3, 10.7, 'gk'),
(10, 10.3, 'cm'),
(9.5, 10, 'br'),
(9, 9.5, 'gk')):
plot_fit_grouped(parvals, 't_ccd', 0.5, mag_filter(v0, v1) & mt,
colors=colors, label=f'{v0} < mag < {v1}', probit=probit)
plt.legend(loc='upper left')
plt.ylim(-3, 3) if probit else plt.ylim(-0.1, 1.1)
plt.xlabel('t_ccd')
plt.ylabel('p_fail')
plt.tight_layout()
plt.grid()
def mag_filter(mag0, mag1):
ok = (data_all['mag_aca'] > mag0) & (data_all['mag_aca'] < mag1)
return ok
def t_ccd_filter(t_ccd0, t_ccd1):
ok = (data_all['t_ccd'] > t_ccd0) & (data_all['t_ccd'] < t_ccd1)
return ok
def wp_filter(wp0, wp1):
ok = (data_all['warm_pix'] > wp0) & (data_all['warm_pix'] < wp1)
return ok
mag_centers = np.array([6.3, 8.1, 9.1, 9.55, 9.75, 10.0, 10.25, 10.55, 10.75, 11.0])
mag_bins = (mag_centers[1:] + mag_centers[:-1]) / 2
mag_means = np.array([8.0, 9.0, 9.5, 9.75, 10.0, 10.25, 10.5, 10.75])
for m0, m1, mm in zip(mag_bins[:-1], mag_bins[1:], mag_means):
ok = (data_all['asvt'] == False) & (data_all['mag_aca'] >= m0) & (data_all['mag_aca'] < m1)
print(f"m0={m0:.2f} m1={m1:.2f} mean_mag={data_all['mag_aca'][ok].mean():.2f} vs. {mm}")
plot_fails_mag_aca_vs_t_ccd(mag_bins)
mask_no_1p5 = ((data_all['red_mag_err'] == False) &
(data_all['t_ccd'] > -16))
mag0s, mag1s = mag_bins[:-1], mag_bins[1:]
fits = {}
for m0, m1 in zip(mag0s, mag1s):
print(m0, m1)
fits[m0, m1] = fit_poly_model(data_all[mask_no_1p5 & mag_filter(m0, m1)])
colors = [f'kC{i}' for i in range(9)]
plt.figure(figsize=(13, 4))
for subplot in (1, 2):
plt.subplot(1, 2, subplot)
probit = (subplot == 2)
for m0_m1, color, mag_mean in zip(list(fits), colors, mag_means):
fit = fits[m0_m1]
m0, m1 = m0_m1
plot_fit_grouped(fit.parvals, 't_ccd', 1.0,
mask=mask_no_1p5 & mag_filter(m0, m1),
probit=probit, colors=color, label=f'{mag_mean}')
plt.grid()
plt.ylabel(('Probit ' if probit else '') + 'p_fail')
plt.xlabel('T_ccd')
if probit:
plt.ylim(-3.5, 3.5)
plt.legend(fontsize='small', loc='upper left');
# This computes probabilities for 120 arcsec boxes, corresponding to raw data
t_ccds = np.linspace(-16, -0, 20)
plt.figure(figsize=(13, 4))
for subplot in (1, 2):
plt.subplot(1, 2, subplot)
for m0_m1, color, mag_mean in zip(list(fits), colors, mag_means):
fit = fits[m0_m1]
m0, m1 = m0_m1
probs = p_fail_binned(fit.parvals, t_ccds)
if subplot == 2:
probs = stats.norm.ppf(probs)
plt.plot(t_ccds, probs, label=f'{mag_mean}')
plt.legend()
plt.xlabel('T_ccd')
plt.ylabel('P_fail' if subplot == 1 else 'Probit(p_fail)')
plt.grid()
p0s = []
p1s = []
p2s = []
for fit in fits.values():
ps = fit.parvals
p0s.append(ps[0])
p1s.append(ps[1])
p2s.append(ps[2])
plt.plot(mag_means, p0s, '.-', label='p0')
plt.plot(mag_means, p1s, '.-', label='p1')
plt.plot(mag_means, p2s, '.-', label='p2')
plt.legend(fontsize='small')
plt.grid()
def print_pvals(ps, idx):
vals_str = ', '.join(f'{p:.3f}' for p in ps)
print(f'p{idx}s = np.array([{vals_str}])')
print_pvals(p0s, 0)
print_pvals(p1s, 1)
print_pvals(p2s, 2)
def calc_p_fail(t_ccds, mags, halfws=120, probit=False):
Acquisition probability model
:param t_ccds: CCD temperature(s)
:param mags: Star mag(s)
:param box_deltas:
mag_means = np.array([8.0, 9.0, 9.5, 9.75, 10.0, 10.25, 10.5, 10.75])
p0s = np.array([-2.801, -2.555, -1.956, -1.917, -1.971, -2.082, -1.919, -0.569])
p1s = np.array([-3.150, -2.104, -2.459, -1.942, -1.495, -1.041, -0.764, -0.932])
p2s = np.array([1.375, 0.573, 1.187, 0.933, 0.506, 0.066, -0.464, -0.722])
# Make sure box_delta has right dimensions
t_ccds, mags, halfws = np.broadcast_arrays(t_ccds, mags, halfws)
tcs = (t_ccds + 16) / 10
box_deltas = get_box_delta(halfws)
tcx = np.linspace(-0.1, 2.0, 40)
p_fails = []
for tc, mag, box_delta in zip(tcs.flat, mags.flat, box_deltas.flat):
p0 = np.interp(mag, mag_means, p0s)
p1 = np.interp(mag, mag_means, p1s)
p2 = np.interp(mag, mag_means, p2s)
tcy = np.cumsum(stats.norm.cdf(p1 + p2 * tcx))
p_fail = p0 + np.interp(tc, tcx, tcy) + box_delta
if not probit:
p_fail = stats.norm.cdf(p_fail) # transform from probit to linear probability
p_fails.append(p_fail)
return np.array(p_fails).reshape(t_ccds.shape)
mags, t_ccds = np.mgrid[8.75:10.75:30j, -16:-4:30j]
p_fails = calc_p_fail(t_ccds, mags, probit=False)
cs = plt.contour(t_ccds, mags, p_fails, levels=[0.05, 0.2, 0.5, 0.75, 0.9], colors=['g', 'b', 'c', 'm', 'r'])
plt.clabel(cs, inline=1, fontsize=10)
plt.grid()
plt.xlim(-17, -4)
plt.ylim(8.5, 11.0)
plt.xlabel('T_ccd (degC)')
plt.ylabel('Mag_ACA')
plt.title('Failure probability');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get acq stats data and clean
Step2: Get ASVT data and make it look more like acq stats data
Step3: Combine flight acqs and ASVT data
Step4: Compute box probit delta term based on box size
Step7: Model definition
Step8: Plotting and validation
Step9: Define magnitude bins for fitting and show data
Step11: Color != 1.5 fit (this is MOST acq stars)
|
4,373
|
<ASSISTANT_TASK:>
Python Code:
import phoebe
b = phoebe.default_binary()
b.add_dataset('lc', dataset='lc01')
print(b.filter(qualifier='pblum*', dataset='lc01'))
print(b.get_parameter('pblum_mode'))
print(b.get_parameter('pblum_component'))
b.set_value('pblum_component', 'secondary')
print(b.filter(qualifier='pblum*', dataset='lc01'))
b.set_value('pblum_mode', 'decoupled')
print(b.filter(qualifier='pblum*', dataset='lc01'))
print(b.compute_pblums())
b.add_dataset('mesh', dataset='mesh01')
print(b.get_parameter('columns', dataset='mesh01').choices)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the default mode, you can change which of the stars you'd like to provide the luminosity. By default, this is the primary component. To provide the luminosity of the secondary star instead, set pblum_component.
Step2: Previously, you could 'decouple' the luminisoties by setting pblum_ref of both components to 'self'. In PHOEBE 2.2+, you will instead change pblum_mode to 'decoupled', in which case multiple pblum parameters will become visible.
Step3: For more information on the behavior for all of these supported modes, see the pblum tutorial.
Step4: This also means that the mesh column to expose luminosities is renamed to pblum_ext (and abs_pblum_ext) as these expose the extrinsic luminosities (including features such as spots, irradiation, etc).
|
4,374
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pymc3 import *
import numpy as np
import matplotlib.pyplot as plt
size = 200
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
# y = a + b*x
true_regression_line = true_intercept + true_slope * x
# add noise
y = true_regression_line + np.random.normal(scale=.5, size=size)
data = dict(x=x, y=y)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, xlabel='x', ylabel='y', title='Generated data and underlying model')
ax.plot(x, y, 'x', label='sampled data')
ax.plot(x, true_regression_line, label='true regression line', lw=2.)
plt.legend(loc=0);
with Model() as model: # model specifications in PyMC3 are wrapped in a with-statement
# Define priors
sigma = HalfCauchy('sigma', beta=10, testval=1.)
intercept = Normal('Intercept', 0, sd=20)
x_coeff = Normal('x', 0, sd=20)
# Define likelihood
likelihood = Normal('y', mu=intercept + x_coeff * x,
sd=sigma, observed=y)
# Inference!
trace = sample(3000, njobs=2) # draw 3000 posterior samples using NUTS sampling
with Model() as model:
# specify glm and pass in data. The resulting linear model, its likelihood and
# and all its parameters are automatically added to our model.
glm.GLM.from_formula('y ~ x', data)
trace = sample(3000, njobs=2) # draw 3000 posterior samples using NUTS sampling
plt.figure(figsize=(7, 7))
traceplot(trace[100:])
plt.tight_layout();
plt.figure(figsize=(7, 7))
plt.plot(x, y, 'x', label='data')
plot_posterior_predictive_glm(trace, samples=100,
label='posterior predictive regression lines')
plt.plot(x, true_regression_line, label='true regression line', lw=3., c='y')
plt.title('Posterior predictive regression lines')
plt.legend(loc=0)
plt.xlabel('x')
plt.ylabel('y');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating data
Step2: Estimating the model
Step3: This should be fairly readable for people who know probabilistic programming. However, would my non-statistican friend know what all this does? Moreover, recall that this is an extremely simple model that would be one line in R. Having multiple, potentially transformed regressors, interaction terms or link-functions would also make this much more complex and error prone.
Step4: Much shorter, but this code does the exact same thing as the above model specification (you can change priors and everything else too if we wanted). glm() parses the Patsy model string, adds random variables for each regressor (Intercept and slope x in this case), adds a likelihood (by default, a Normal is chosen), and all other variables (sigma). Finally, glm() then initializes the parameters to a good starting point by estimating a frequentist linear model using statsmodels.
Step5: The left side shows our marginal posterior -- for each parameter value on the x-axis we get a probability on the y-axis that tells us how likely that parameter value is.
|
4,375
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
fngs_ts = np.load('/home/eric/cmp/fngs/outputs/ts_roi/pp264-2mm/sub-0025864_ses-1_bold_pp264-2mm.npy')
cpac_ts = np.load('/home/eric/cmp/cpac/pipeline_HCPtest/sub-0025864_ses-1/roi_timeseries/_scan_rest_1_rest/_csf_threshold_0.96/_gm_threshold_0.7/_wm_threshold_0.96/_compcor_ncomponents_5_selector_pc10.linear1.wm0.global0.motion1.quadratic1.gm0.compcor1.csf0/_mask_pp264-2mm/roi_pp264-2mm.npz')['roi_data']
fig = plt.figure()
tr = 2
ax = fig.add_subplot(111)
print
# visualize FNGS timeseries
ax.plot(np.arange(0, fngs_ts.shape[1]*tr, tr), np.transpose(fngs_ts), alpha=0.6)
ax.set_ylabel('Intensity')
ax.set_xlabel('Time (s)')
ax.set_title('FNGS Timeseries')
fig.show()
fig = plt.figure()
ax = fig.add_subplot(111)
# visualize CPAC timeseries
ax.plot(np.arange(0, cpac_ts.shape[1]*tr, tr), cpac_ts.transpose(), alpha=0.7)
ax.set_ylabel('Normalized Intensity')
ax.set_xlabel('Time (s)')
ax.set_ylim([-200, 200])
ax.set_title('CPAC Timeseries')
fig.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.imshow(np.abs(np.corrcoef(fngs_ts)))
ax.set_xlabel('ROI')
ax.set_ylabel('ROI')
ax.set_title('FNGS Correlation Matrix')
cbar = fig.colorbar(cax)
fig.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.imshow(np.abs(np.corrcoef(cpac_ts)))
ax.set_xlabel('ROI')
ax.set_ylabel('ROI')
ax.set_title('CPAC Correlation Matrix')
cbar = fig.colorbar(cax)
fig.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Things to Note
|
4,376
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
import inspect
# add the path to opengrid to sys.path
script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(os.path.join(script_dir, os.pardir, os.pardir))
from opengrid.library import houseprint
from opengrid.library import config
c=config.Config()
sys.path.append(c.get('tmpo', 'folder'))
import tmpo
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import pytz
bxl=pytz.timezone('Europe/Brussels')
%matplotlib inline
plt.rcParams['figure.figsize'] = 14,8
s = tmpo.Session()
s.debug = True
s.add('d209e2bbb35b82b83cc0de5e8b84a4ff','e16d9c9543572906a11649d92f902226')
s.sync()
ts = s.series('d209e2bbb35b82b83cc0de5e8b84a4ff')
print ts
ts.plot()
plt.show()
ts.index = pd.to_datetime((ts.index.values*1e9).astype(int))
ts.plot()
plt.show()
tsmin = ts.resample(rule='60s')
tsmin=tsmin.interpolate(method='linear')
tsmin=tsmin.diff()*3600/60
tsmin.plot()
tsmin.ix['20141016':'20141018'].plot()
ts.name
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a tmpo session, and enter debug mode to get more output.
Step2: Add a sensor and token to start tracking the data for this given sensor. You only have to do this once for each sensor.
Step3: Sync all available data to your hard drive. All sensors previously added will be synced.
Step4: Now you can create a pandas timeseries with all data from a given sensor.
Step5: When plotting the data, you'll notice that this ts contains cumulative data, and the time axis (= pandas index) contains seconds since the epoch. Not very practical.
Step6: So the first thing to do is create an DatetimeIndex.
Step7: To show differential data (eg instantaneous power), we first have to resample this cumulative data to the interval we want to obtain. We use linear interpolation to approximate the cumulative value between two datapoints. In the example below, we resample to minute values. Then, we take the difference between the cumulative values at minute intervals in order to get the average power (per minute). As the original data is in Wh, we have to convert it to W.
Step8: If we want to plot only a specific period, we can slice the data with the .ix[from
|
4,377
|
<ASSISTANT_TASK:>
Python Code:
from pylab import * # numpy, matplotlib, plt
from bregman.suite import * # Bregman audio feature extraction library
from soundscapeecology import * # 2D time-frequency shift-invariant convolutive matrix factorization
%matplotlib inline
rcParams['figure.figsize'] = (15.0, 9.0)
sound_path = 'sounds'
sounds = os.listdir(sound_path)
print "sounds:", sounds
N=4096; H=N/4
x,sr,fmt = wavread(os.path.join(sound_path,sounds[0]))
print "sample_rate:", sr, "(Hz), fft size:", (1000*N)/sr, "(ms), hop size:", (1000*H)/sr, "(ms)"
# 1. Instantiate a new SoundscapeEcololgy object using the spectral analysis parameters defined above
S = SoundscapeEcology(nfft=N, wfft=N/2, nhop=H)
# Inspect the contents of this object
print S.__dict__
# 2. load_audio() - sample segments of the soundfile without replacement, to speed up analysis
# The computational complexity of the analysis is high, and the information in a soundscape is largely redundant
# So, draw 25 random segments in time order, each consisting of 20 STFT frames (~500ms) of audio data
S.load_audio(os.path.join(sound_path,sounds[0]), num_samples=25, frames_per_sample=20) # num_samples=None means analyze the whole sound file
# 3. analyze() into shift-invariant kernels
# The STFT spectrum will be converted to a constant-Q transform by averaging over logarithmically spaced bins
# The shift-invariant kernels will have shift and time-extent dimensions
# The default kernel shape yields 1-octave of shift (self.feature_params['nbpo']),
# and its duration is frames_per_sample. Here, the num_components and win parameters are illustrated.
S.analyze(num_components=7, win=(S.feature_params['nbpo'], S.frames_per_sample))
# 4. visualize() - visualize the spectrum reconstruction and the individual components
# inputs:
# plotXi - visualize individual reconstructed component spectra [True]
# plotX - visualize original (pre-analysis) spectrum and reconstruction [False]
# plotW - visualize component time-frequency kernels [False]
# plotH - visualize component shift-time activation functions [False]
# **pargs - plotting key word arguments [**self.plt_args]
S.visualize(plotX=True, plotXi=True, plotW=True, plotH=True)
# 5. resynthesize() - sonify the results
# First, listen to the original (inverse STFT) and the full component reconstruction (inverse CQFT with random phases)
x_orig = S.F.inverse(S.X)
x_recon = S.F.inverse(S.X_hat, Phi_hat=(np.random.rand(*S.F.STFT.shape)*2-1)*np.pi) # random phase reconstruction
play(balance_signal(x_orig))
play(balance_signal(x_recon))
# First, listen to the original (inverse CQFT with original phases in STFT reconstruction)
# and the all-components reconstruction (inverse CQFT with random phases)
# Second, listen to the individual component reconstructions
# Use the notebook's "interrupt kernel" button (stop button) if this is too long (n_comps x audio sequence)
# See above plots for the individual component spectrograms
for k in range(S.n_components):
x_hat = S.resynthesize(k) # resynthesize individual component
play(balance_signal(x_hat)) # play it back
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>Example audio</h2>
Step2: <h2>Spectrum Analysis Parameters</h2>
Step3: <h2>SoundscapeEcology Toolkit</h2>
|
4,378
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
from IPython.display import HTML
from ipywidgets import interact
HTML('../style/code_toggle.html')
def FS_coeffs(x, m, func, T=2.0*np.pi):
Computes Fourier series (FS) coeffs of func
Input:
x = input vector at which to evaluate func
m = the order of the coefficient
func = the function to find the FS of
T = the period of func (defaults to 2 pi)
# Evaluate the integrand
am_int = func(x)*np.exp(-1j*2.0*m*np.pi*x/T)
# Use trapezoidal integration to get the coefficient
am = np.trapz(am_int,x)
return am/T
def FS_sum(x, m, func, period=None):
# If no period is specified use entire domain
if period is None:
period = np.abs(x.max() - x.min())
# Evaluate the coefficients and sum the series
f_F = np.zeros(x.size, dtype=np.complex128)
for i in xrange(-m,m+1):
am = FS_coeffs(x, i, func, T=period)
f_F += am*np.exp(2.0j*np.pi*i*x/period)
return f_F
# define square wave function
def square_wave(x):
I = np.argwhere(np.abs(x) <= 0.5)
tmp = np.zeros(x.size)
tmp[I] = 1.0
return tmp
# Set domain and compute square wave
N = 250
x = np.linspace(-1.0,1.0,N)
# Compute the FS up to order m
m = 10
sw_F = FS_sum(x, m, square_wave, period=2.0)
# Plot result
plt.figure(figsize=(15,5))
plt.plot(x, sw_F.real, 'g', label=r'$ Fourier \ series $')
plt.plot(x, square_wave(x), 'b', label=r'$ Square \ wave $')
plt.title(r"$FS \ decomp \ of \ square \ wave$",fontsize=20)
plt.xlabel(r'$x$',fontsize=18)
plt.ylim(-0.05,1.5)
plt.legend()
def inter_FS(x,m,func,T):
f_F = FS_sum(x, m, func, period=T)
plt.plot(x,f_F.real,'b')
plt.plot(x,func(x),'g')
interact(lambda m,T:inter_FS(x=np.linspace(-1.0,1.0,N),m=m,func=square_wave,T=T),
m=(5,100,1),T=(0,2*np.pi,0.5)) and None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step3: 2.3. Fourier Series<a id='math
Step4: That should be good enough for our purposes here. Next we create a function to sum the Fourier series.
Step5: Let's see what happens if we decompose a square wave.
Step6: Figure 2.8.1
|
4,379
|
<ASSISTANT_TASK:>
Python Code:
from dx import *
import time
from pylab import plt
plt.style.use('seaborn')
%matplotlib inline
# constant short rate
r = constant_short_rate('r', 0.02)
# market environments
me_gbm = market_environment('gbm', dt.datetime(2015, 1, 1))
# geometric Brownian motion
me_gbm.add_constant('initial_value', 100.)
me_gbm.add_constant('volatility', 0.2)
me_gbm.add_constant('currency', 'EUR')
me_gbm.add_constant('model', 'gbm')
# valuation environment
val_env = market_environment('val_env', dt.datetime(2015, 1, 1))
val_env.add_constant('paths', 25000)
val_env.add_constant('frequency', 'M')
val_env.add_curve('discount_curve', r)
val_env.add_constant('starting_date', dt.datetime(2015, 1, 1))
val_env.add_constant('final_date', dt.datetime(2015, 12, 31))
# add valuation environment to market environments
me_gbm.add_environment(val_env)
risk_factors = {'gbm' : me_gbm}
gbm = geometric_brownian_motion('gbm_obj', me_gbm)
me_put = market_environment('put', dt.datetime(2015, 1, 1))
me_put.add_constant('maturity', dt.datetime(2015, 12, 31))
me_put.add_constant('strike', 40.)
me_put.add_constant('currency', 'EUR')
me_put.add_environment(val_env)
am_put = valuation_mcs_american_single(
'am_put', mar_env=me_put, underlying=gbm,
payoff_func='np.maximum(strike - instrument_values, 0)')
positions = {}
strikes = np.linspace(80, 120, 100)
for i, strike in enumerate(strikes):
positions[i] = derivatives_position(
name='am_put_pos_%s' % strike,
quantity=1,
underlyings=['gbm'],
mar_env=me_put,
otype='American single',
payoff_func='np.maximum(%5.3f - instrument_values, 0)' % strike)
port_sequ = derivatives_portfolio(
name='portfolio',
positions=positions,
val_env=val_env,
risk_factors=risk_factors,
correlations=None,
parallel=False) # sequential calculation
t0 = time.time()
ress = port_sequ.get_values()
ts = time.time() - t0
print('Time in sec %.2f' % ts)
ress['strike'] = strikes
ress.set_index('strike')['value'].plot(figsize=(10, 6))
plt.ylabel('option value estimates')
port_para = derivatives_portfolio(
'portfolio',
positions,
val_env,
risk_factors,
correlations=None,
parallel=True) # parallel valuation
t0 = time.time()
resp = port_para.get_values()
# parallel valuation with as many cores as available
tp = time.time() - t0
print('Time in sec %.2f' % tp)
plt.figure(figsize=(10, 6))
plt.plot(strikes, resp['value'].values, 'r.', label='parallel')
plt.plot(strikes, ress['value'].values, 'b', label='sequential')
plt.legend(loc=0)
plt.ylabel('option value estimates')
ts / tp
# speed-up factor
# of course harware-dependent
wi = 0.4
plt.figure(figsize=(10, 6))
plt.bar((1.5 - wi/2, 2.5 - wi/2), (ts/ts, tp/ts), width=wi)
plt.xticks((1.5, 2.5), ('sequential', 'parallel'))
plt.ylim(0, 1.1), plt.xlim(0.75, 3.25)
plt.ylabel('relative performance (lower = better)')
plt.title('DX Analytics Portfolio Valuation')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Single Risk Factor
Step2: American Put Option
Step3: Large Portfolio
Step4: Sequential Valuation
Step5: The call of the get_values method to value all instruments ...
Step6: ... and the results visualized.
Step7: Parallel Valuation
Step8: The call of the get_values method for the parall valuation case.
Step9: Again, the results visualized (and compared to the sequential results).
Step10: Speed-up
|
4,380
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
4,381
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install statsmodels
import numpy as np
import pandas as pd
from IPython.display import clear_output
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
import tensorflow as tf
tf.random.set_seed(123)
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return fc.indicator_column(
fc.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(fc.numeric_column(feature_name,
dtype=tf.float32))
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((X.to_dict(orient='list'), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = (dataset
.repeat(n_epochs)
.batch(NUM_EXAMPLES))
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
params = {
'n_trees': 50,
'max_depth': 3,
'n_batches_per_layer': 1,
# You must enable center_bias = True to get DFCs. This will force the model to
# make an initial prediction before using any features (e.g. use the mean of
# the training labels for regression or log odds for classification when
# using cross entropy loss).
'center_bias': True
}
est = tf.estimator.BoostedTreesClassifier(feature_columns, **params)
# Train model.
est.train(train_input_fn, max_steps=100)
# Evaluation.
results = est.evaluate(eval_input_fn)
clear_output()
pd.Series(results).to_frame()
in_memory_params = dict(params)
in_memory_params['n_batches_per_layer'] = 1
# In-memory input_fn does not use batching.
def make_inmemory_train_input_fn(X, y):
y = np.expand_dims(y, axis=1)
def input_fn():
return dict(X), y
return input_fn
train_input_fn = make_inmemory_train_input_fn(dftrain, y_train)
# Train the model.
est = tf.estimator.BoostedTreesClassifier(
feature_columns,
train_in_memory=True,
**in_memory_params)
est.train(train_input_fn)
print(est.evaluate(eval_input_fn))
import matplotlib.pyplot as plt
import seaborn as sns
sns_colors = sns.color_palette('colorblind')
pred_dicts = list(est.experimental_predict_with_explanations(eval_input_fn))
# Create DFC Pandas dataframe.
labels = y_eval.values
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
df_dfc = pd.DataFrame([pred['dfc'] for pred in pred_dicts])
df_dfc.describe().T
# Sum of DFCs + bias == probabality.
bias = pred_dicts[0]['bias']
dfc_prob = df_dfc.sum(axis=1) + bias
np.testing.assert_almost_equal(dfc_prob.values,
probs.values)
# Boilerplate code for plotting :)
def _get_color(value):
To make positive DFCs plot green, negative DFCs plot red.
green, red = sns.color_palette()[2:4]
if value >= 0: return green
return red
def _add_feature_values(feature_values, ax):
Display feature's values on left of plot.
x_coord = ax.get_xlim()[0]
OFFSET = 0.15
for y_coord, (feat_name, feat_val) in enumerate(feature_values.items()):
t = plt.text(x_coord, y_coord - OFFSET, '{}'.format(feat_val), size=12)
t.set_bbox(dict(facecolor='white', alpha=0.5))
from matplotlib.font_manager import FontProperties
font = FontProperties()
font.set_weight('bold')
t = plt.text(x_coord, y_coord + 1 - OFFSET, 'feature\nvalue',
fontproperties=font, size=12)
def plot_example(example):
TOP_N = 8 # View top 8 features.
sorted_ix = example.abs().sort_values()[-TOP_N:].index # Sort by magnitude.
example = example[sorted_ix]
colors = example.map(_get_color).tolist()
ax = example.to_frame().plot(kind='barh',
color=colors,
legend=None,
alpha=0.75,
figsize=(10,6))
ax.grid(False, axis='y')
ax.set_yticklabels(ax.get_yticklabels(), size=14)
# Add feature values.
_add_feature_values(dfeval.iloc[ID][sorted_ix], ax)
return ax
# Plot results.
ID = 182
example = df_dfc.iloc[ID] # Choose ith example from evaluation set.
TOP_N = 8 # View top 8 features.
sorted_ix = example.abs().sort_values()[-TOP_N:].index
ax = plot_example(example)
ax.set_title('Feature contributions for example {}\n pred: {:1.2f}; label: {}'.format(ID, probs[ID], labels[ID]))
ax.set_xlabel('Contribution to predicted probability', size=14)
plt.show()
# Boilerplate plotting code.
def dist_violin_plot(df_dfc, ID):
# Initialize plot.
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
# Create example dataframe.
TOP_N = 8 # View top 8 features.
example = df_dfc.iloc[ID]
ix = example.abs().sort_values()[-TOP_N:].index
example = example[ix]
example_df = example.to_frame(name='dfc')
# Add contributions of entire distribution.
parts=ax.violinplot([df_dfc[w] for w in ix],
vert=False,
showextrema=False,
widths=0.7,
positions=np.arange(len(ix)))
face_color = sns_colors[0]
alpha = 0.15
for pc in parts['bodies']:
pc.set_facecolor(face_color)
pc.set_alpha(alpha)
# Add feature values.
_add_feature_values(dfeval.iloc[ID][sorted_ix], ax)
# Add local contributions.
ax.scatter(example,
np.arange(example.shape[0]),
color=sns.color_palette()[2],
s=100,
marker="s",
label='contributions for example')
# Legend
# Proxy plot, to show violinplot dist on legend.
ax.plot([0,0], [1,1], label='eval set contributions\ndistributions',
color=face_color, alpha=alpha, linewidth=10)
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large',
frameon=True)
legend.get_frame().set_facecolor('white')
# Format plot.
ax.set_yticks(np.arange(example.shape[0]))
ax.set_yticklabels(example.index)
ax.grid(False, axis='y')
ax.set_xlabel('Contribution to predicted probability', size=14)
dist_violin_plot(df_dfc, ID)
plt.title('Feature contributions for example {}\n pred: {:1.2f}; label: {}'.format(ID, probs[ID], labels[ID]))
plt.show()
importances = est.experimental_feature_importances(normalize=True)
df_imp = pd.Series(importances)
# Visualize importances.
N = 8
ax = (df_imp.iloc[0:N][::-1]
.plot(kind='barh',
color=sns_colors[0],
title='Gain feature importances',
figsize=(10, 6)))
ax.grid(False, axis='y')
# Plot.
dfc_mean = df_dfc.abs().mean()
N = 8
sorted_ix = dfc_mean.abs().sort_values()[-N:].index # Average and sort by absolute.
ax = dfc_mean[sorted_ix].plot(kind='barh',
color=sns_colors[1],
title='Mean |directional feature contributions|',
figsize=(10, 6))
ax.grid(False, axis='y')
FEATURE = 'fare'
feature = pd.Series(df_dfc[FEATURE].values, index=dfeval[FEATURE].values).sort_index()
ax = sns.regplot(feature.index.values, feature.values, lowess=True)
ax.set_ylabel('contribution')
ax.set_xlabel(FEATURE)
ax.set_xlim(0, 100)
plt.show()
def permutation_importances(est, X_eval, y_eval, metric, features):
Column by column, shuffle values and observe effect on eval set.
source: http://explained.ai/rf-importance/index.html
A similar approach can be done during training. See "Drop-column importance"
in the above article.
baseline = metric(est, X_eval, y_eval)
imp = []
for col in features:
save = X_eval[col].copy()
X_eval[col] = np.random.permutation(X_eval[col])
m = metric(est, X_eval, y_eval)
X_eval[col] = save
imp.append(baseline - m)
return np.array(imp)
def accuracy_metric(est, X, y):
TensorFlow estimator accuracy.
eval_input_fn = make_input_fn(X,
y=y,
shuffle=False,
n_epochs=1)
return est.evaluate(input_fn=eval_input_fn)['accuracy']
features = CATEGORICAL_COLUMNS + NUMERIC_COLUMNS
importances = permutation_importances(est, dfeval, y_eval, accuracy_metric,
features)
df_imp = pd.Series(importances, index=features)
sorted_ix = df_imp.abs().sort_values().index
ax = df_imp[sorted_ix][-5:].plot(kind='barh', color=sns_colors[2], figsize=(10, 6))
ax.grid(False, axis='y')
ax.set_title('Permutation feature importance')
plt.show()
from numpy.random import uniform, seed
from scipy.interpolate import griddata
# Create fake data
seed(0)
npts = 5000
x = uniform(-2, 2, npts)
y = uniform(-2, 2, npts)
z = x*np.exp(-x**2 - y**2)
xy = np.zeros((2,np.size(x)))
xy[0] = x
xy[1] = y
xy = xy.T
# Prep data for training.
df = pd.DataFrame({'x': x, 'y': y, 'z': z})
xi = np.linspace(-2.0, 2.0, 200),
yi = np.linspace(-2.1, 2.1, 210),
xi,yi = np.meshgrid(xi, yi)
df_predict = pd.DataFrame({
'x' : xi.flatten(),
'y' : yi.flatten(),
})
predict_shape = xi.shape
def plot_contour(x, y, z, **kwargs):
# Grid the data.
plt.figure(figsize=(10, 8))
# Contour the gridded data, plotting dots at the nonuniform data points.
CS = plt.contour(x, y, z, 15, linewidths=0.5, colors='k')
CS = plt.contourf(x, y, z, 15,
vmax=abs(zi).max(), vmin=-abs(zi).max(), cmap='RdBu_r')
plt.colorbar() # Draw colorbar.
# Plot data points.
plt.xlim(-2, 2)
plt.ylim(-2, 2)
zi = griddata(xy, z, (xi, yi), method='linear', fill_value='0')
plot_contour(xi, yi, zi)
plt.scatter(df.x, df.y, marker='.')
plt.title('Contour on training data')
plt.show()
fc = [tf.feature_column.numeric_column('x'),
tf.feature_column.numeric_column('y')]
def predict(est):
Predictions from a given estimator.
predict_input_fn = lambda: tf.data.Dataset.from_tensors(dict(df_predict))
preds = np.array([p['predictions'][0] for p in est.predict(predict_input_fn)])
return preds.reshape(predict_shape)
train_input_fn = make_input_fn(df, df.z)
est = tf.estimator.LinearRegressor(fc)
est.train(train_input_fn, max_steps=500);
plot_contour(xi, yi, predict(est))
n_trees = 37 #@param {type: "slider", min: 1, max: 80, step: 1}
est = tf.estimator.BoostedTreesRegressor(fc, n_batches_per_layer=1, n_trees=n_trees)
est.train(train_input_fn, max_steps=500)
clear_output()
plot_contour(xi, yi, predict(est))
plt.text(-1.8, 2.1, '# trees: {}'.format(n_trees), color='w', backgroundcolor='black', size=20)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 勾配ブースティング木
Step2: 特徴量の説明については、前のチュートリアルをご覧ください。
Step3: 入力パイプラインを構築する
Step4: モデルをトレーニングする
Step5: パフォーマンスの理由により、データがメモリに収まる場合は、tf.estimator.BoostedTreesClassifier 関数の train_in_memory=True 引数を使用することをお勧めします。ただし、トレーニング時間が問題ではない場合、または非常に大規模なデータセットを使用しており、分散型トレーニングを実施する場合は、上記に示される tf.estimator.BoostedTrees API を使用してください。
Step6: モデルの解釈とプロットの作成
Step7: ローカル解釈可能性
Step8: DFC の便利な特性は、貢献度とバイアスの和が特定の例の予測と同等であるということです。
Step11: 個別の乗船者に対し、DFC を描画します。貢献度の指向性に基づいてプロットに色を付け、図に特徴量値を追加しましょう。
Step12: 貢献度が大きくなると、モデルの予測に対する影響度も増します。負の貢献度は、この例の特徴量値によってモデルの予測が減少したことを示し、正の値は予測が増加したことを示します。
Step13: この例をプロットに描画します。
Step14: 最後に、LIME や shap といったサードパーティ製ツールを使用すると、モデルの個別の予測を理解しやすくなります。
Step15: 平均絶対 DFC
Step16: また、特徴量値が変化するにつれ、DFC がどのように変化するのかも確認できます。
Step19: パーミュテーション特徴量重要度
Step20: モデルの適合性を視覚化する
Step22: 関数を視覚化できます。赤味が強くなるほど、より大きな関数値に対応します。
Step23: まず、線形モデルをデータにフィットしてみましょう。
Step24: あまり良いフィットではありません。次に、モデルが関数にどのようにフィットするかを理解するために、GBDT モデルをフィットしてみましょう。
|
4,382
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
np.random.seed(10)
a = tf.constant(np.random.rand(50, 100, 512))
def g(a):
return tf.expand_dims(a, 2)
result = g(a.__copy__())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
4,383
|
<ASSISTANT_TASK:>
Python Code:
print cobra.__version__
model_iEK = cobra.io.load_json_model("iEK1020.json")
print "# of reactions", len(model_iEK.reactions)
print "# of metabolites", len(model_iEK.metabolites)
print "# of genes", len(model_iEK.genes)
# for react in model_iEK_before.reactions:
# if react not in model_iEK.reactions:
# print react, react.reaction, react.gene_reaction_rule
model_iEK.id
model_iEK.medium
model_iEK.optimize().f
# print len(cobra.manipulation.validate.check_mass_balance(model_iEK).keys())
mass_bal_metabs, mass_bal_reacts = [], []
for react_id, mass_bal in cobra.manipulation.validate.check_mass_balance(model_iEK).iteritems():
# ------ Don't count exchange reactions and biomass functions -----
if str(react_id)[:3]!="EX_" and "biomass" not in str(react_id):
rxn_metabs = [str(x.id) for x in model_iEK.reactions.get_by_id(react_id.id).metabolites]
mass_bal_metabs.extend(rxn_metabs)
mass_bal_reacts.append(str(react_id))
# print str(react_id), mass_bal, react_id.reaction, react_id.gene_reaction_rule
print len(mass_bal_reacts)
print mass_bal_reacts
# Number of reactions not mass balanced. 4 of these have artifical metabolites (i.e. such as 0.0001 cobalamin) and
# thus cannot be mass balanced - perhaps I should remove said artifical instances.
print len([x for x in mass_bal_reacts if "FAD_" not in x])
blocked_reacts = cobra.flux_analysis.variability.find_blocked_reactions(model_iEK,
reaction_list=None,
zero_cutoff=1e-09,
open_exchanges=True)
print len(blocked_reacts)
blocked_reacts
griffin_file = "../../Dataframes/Table_S2.xlsx"
griffin_excel = pd.read_excel(griffin_file, sheetname='supp table 2',skiprows = 9,keep_default_na=False)
griffin_excel.head()
def griffin_essen(model_tb, dic_return):
model = model_tb.copy()
fal_pos_dic, fal_neg_dic = {}, {}
true_neg_dic, true_pos_dic = {}, {}
growth_rates = single_gene_deletion(model)
print "Optimal growth", model.optimize().f
print model.summary()
orig_growth_thres = 0.25*model.optimize().f
print "Threshold growth", orig_growth_thres
true_pos, true_neg, fal_pos, fal_neg = 0, 0, 0, 0
# set grif essen threshold -- iSM810 paper uses 0.1 as "confident essential"
grif_thres = 0.1
print model.reactions.get_by_id("Kt3r").reaction
for index, row in griffin_excel.iterrows():
gene = str(row["Locus"])
try:
growth = growth_rates.loc[gene, "flux"]
# True negative - predicts that it grows (not essential) and is correct.
if float(row["p value"]) > grif_thres and growth > orig_growth_thres:
true_neg = true_neg + 1
true_neg_dic.update({gene: [growth, float(row["p value"])]})
# False negative - predicts that it grows (not essential) when it actually essential
if float(row["p value"]) < grif_thres and growth > orig_growth_thres:
fal_neg = fal_neg + 1
fal_neg_dic.update({gene: [growth, float(row["p value"])]})
if float(row["p value"]) < grif_thres and growth < orig_growth_thres:
true_pos = true_pos + 1
true_pos_dic.update({gene: [growth, float(row["p value"])]})
if float(row["p value"]) > grif_thres and growth < orig_growth_thres:
fal_pos = fal_pos + 1
fal_pos_dic.update({gene: [growth, float(row["p value"])]})
except:
pass
# ---Analyze and Print results ---
print "TP - TN - FP - FN"
print true_pos, true_neg, fal_pos, fal_neg
# percent of correct predictions
perc_correct = (true_pos+true_neg)/(true_pos+true_neg+fal_pos+float(fal_neg))
print "percent correct: ", perc_correct
# mcc calculation
MCC_root = math.sqrt((true_pos + fal_pos)*(true_pos + fal_neg)*(true_neg + fal_pos)*(true_neg + fal_neg))
MCC = (true_pos*true_neg - fal_pos*fal_neg)/MCC_root
print "Matthew Correlation Coefficient", MCC
if dic_return == "Yes":
return fal_neg_dic, fal_pos_dic
elif dic_return == "Yes both":
return fal_neg_dic, fal_pos_dic, true_neg_dic, true_pos_dic
griffin_media = {
"EX_h": 1000, # hydrogen
"EX_h2o": 1000, # water
"EX_o2": 20.0, # oxygen
"EX_asn_L": 1.0, # asparagine
"EX_nh4": 10.0, # ammonium
"EX_cit":5.0, # citrate
"EX_etoh": 5.0, # ethanol
"EX_ca2": 1000.0, # calcium for CaCl2
"EX_cl": 1000.0, # chloride for CaCl2
"EX_mg2": 1000.0, # mg for MgSO4
"EX_so4": 1000, # so4 for MgSO4
"EX_fe3":5.0, # fe3 for ferric
"EX_glyc":5.0, # glycerol
"EX_pi":1.0, # phosphate
"EX_chsterol":5.0, # cholesterol
}
model_iEK.medium = griffin_media
FN_dic, FP_dic, TN_dic, TP_dic = griffin_essen(model_iEK, "Yes both")
%matplotlib inline
matplotlib.style.use('seaborn-darkgrid')
mcc = (0.27, 0.51, 0.52, 0.52, 0.53)
N = 5
ind = np.arange(N) # the x locations for the groups
width = .45
fig, ax = plt.subplots(figsize=(8, 5))
colors = ['yellowgreen','yellowgreen','yellowgreen','yellowgreen','lightskyblue']
rects1 = ax.bar(ind, mcc, width,align='center', color=colors)
# ---- Add some text for labels, title and axes ticks
ax.set_ylabel('Matthews Correlation Coefficient')
ax.set_xlabel('Metabolic Model')
ax.set_title('Gene Essentiality Prediction Comparisons')
ax.set_xticks(ind)
ax.set_xticklabels(('iNJ661', 'GSMN-TB1.1', 'sMtb', 'iSM810', 'iEK1020'))
# ----- Add values on top of barchart
rects = ax.patches
# Now make some labels
# labels = ["label%d" % i for i in xrange(len(rects))]
labels = mcc
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height+0.01, label, ha='center',
va='bottom',fontsize=13)
fig.savefig("../../Figures/gene_essentiality_comparisons.svg", bbox_inches='tight')
inVitro_drugTesting = {
'EX_asn_L': 1,
'EX_ca2': 1000.0,
'EX_cit': 1.0,
'EX_cl': 1000.0,
'EX_etoh': 1,
'EX_fe3': 5.0,
'EX_glyc': 1.0,
'EX_h': 1000,
'EX_mg2': 1000.0,
'EX_mobd': 1000,
'EX_nh4': 20.0,
'EX_o2': 20.0,
'EX_pi': 1000,
'EX_so4': 1000,
}
inVivo_conditions = {
'EX_ala_L': 1,
'EX_asn_L': 5,
'EX_asp_L': 5,
'EX_urea': .1,
'EX_glu_L': 1,
'EX_gln_L': 1,
'EX_ca2': 1000,
'EX_cl': 1000,
'EX_co2': 1000,
'EX_cobalt2': 1000,
'EX_cu2': 1000,
'EX_fe3': 5,
'EX_h': 1000,
'EX_hdca': 10,
'EX_k': 1000,
'EX_mg2': 1000,
'EX_mobd': 1000,
'EX_na1': 1000,
'EX_no3': 1.5,
'EX_o2': .5, # - hypoxia
'EX_pi': 1000,
'EX_ppa': 20,
'EX_so4': 1000,
'EX_ocdca': 10,
'EX_ttdca': 10,
"EX_nodcoa": 10,
'EX_chsterol': 10,
"EX_octscoa": 10,
}
model_iEK.medium = inVitro_drugTesting
solution = model_iEK.optimize()
pfba_norm_solution = cobra.flux_analysis.pfba(model_iEK)
print "Optimal growth: ", pfba_norm_solution.fluxes["biomass"]
print "ICL FBA flux:", solution.fluxes["ICL"]
print "ICL pFBA flux:", pfba_norm_solution.fluxes["ICL"]
print "Total model flux:", pfba_norm_solution.fluxes.sum()
fva_norm = cobra.flux_analysis.flux_variability_analysis(model_iEK,
fraction_of_optimum=.95,
solver=None)
fva_norm = pd.DataFrame.from_dict(fva_norm).T.round(5)
print "----"
print model_iEK.summary()
print "----"
print "Max ICL flux variability:", fva_norm["ICL"]["maximum"]
model_iEK.medium = inVivo_conditions
solution = model_iEK.optimize()
pfba_hypox_solution = cobra.flux_analysis.pfba(model_iEK)
print "Optimal growth: ", pfba_hypox_solution.fluxes["biomass"]
print "ICL FBA flux:", solution.fluxes["ICL"]
print "ICL pFBA flux:", pfba_hypox_solution.fluxes["ICL"]
print "Total model flux:", pfba_hypox_solution.fluxes.sum()
fva_hypoxia = cobra.flux_analysis.flux_variability_analysis(model_iEK,
fraction_of_optimum=.95,
solver=None)
fva_hypox = pd.DataFrame.from_dict(fva_hypoxia).T.round(5)
print "----"
print model_iEK.summary()
print "----"
print "Max ICL flux variability:", fva_hypox["ICL"]["maximum"]
reacts_of_interest = ['EX_succ', 'EX_ac', 'EX_nh4', 'ICL', '2MCS', 'ICDHy','KGD2', 'ENO', 'PEPCK_re', 'ATPS4r']
for react in reacts_of_interest:
print react
print "pFBA:", pfba_norm_solution.fluxes[react] ,pfba_hypox_solution.fluxes[react]
print "FVA max:", fva_norm[react]["maximum"], fva_hypox[react]["maximum"]
print "FVA min:", fva_norm[react]["minimum"], fva_hypox[react]["minimum"]
print "-----"
model_iEK.medium = inVitro_drugTesting
pfba_solution = cobra.flux_analysis.pfba(model_iEK)
opt_value = pfba_solution["biomass"]
# -- constrain solution space to 0.95 of optimum --
with model_iEK:
print model_iEK.reactions.get_by_id("biomass").lower_bound
model_iEK.reactions.get_by_id("biomass").lower_bound = opt_value*.95
print model_iEK.reactions.get_by_id("biomass").lower_bound
%time inVitro_samples = sample(model_iEK, 10000, processes=4)
print inVitro_samples.head()
model_iEK.medium = inVivo_conditions
pfba_solution = cobra.flux_analysis.pfba(model_iEK)
opt_value = pfba_solution["biomass"]
# -- constrain solution space to 0.95 of optimum --
with model_iEK:
print model_iEK.reactions.get_by_id("biomass").lower_bound
model_iEK.reactions.get_by_id("biomass").lower_bound = opt_value*.95
print model_iEK.reactions.get_by_id("biomass").lower_bound
%time inVivo_samples = sample(model_iEK, 10000, processes=4)
print inVivo_samples.head()
%matplotlib inline
plt.style.use('seaborn-white')
for react in reacts_of_interest:
inVitro_samples[react].hist(bins=80)
inVivo_samples[react].hist(bins=80)
plt.title(react)
plt.ylabel("sample frequency")
plt.xlabel("flux")
plt.show()
def plot_box_plot(samples1, samples2, name1, name2, rxn, title):
box_1 = samples1[rxn].copy()
box_1.name = name1
box_2 = samples2[rxn].copy()
box_2.name = name2
box_df = pd.concat([box_1, box_2], axis=1)
color = dict(boxes='k', whiskers='k', medians='r', caps='Gray')
box_df.plot.box(sym='', title=title, color=color)
plt.ylabel("Flux")
# save_fig_name = rxn+"_sampling_boxplot.svg"
# plt.savefig(save_fig_name)
for react in reacts_of_interest:
plot_box_plot(inVitro_samples, inVivo_samples,
"in vitro media", "in vivo media",
react,
react)
model_iEK = cobra.io.load_json_model("iEK1020.json")
print model_iEK.medium
react_dict = []
for react in model_iEK.reactions:
react_dict.append({'Reaction ID': str(react.id),
'Reaction Name': str(react.name),
'Subsystem': str(react.subsystem),
'Reaction Formula': str(react.reaction),
'Lower Bound': str(react.lower_bound),
'Upper Bound': str(react.upper_bound),
'Gene Reaction Rule': str(react.gene_reaction_rule)
})
react_df = pd.DataFrame(react_dict)
react_df = react_df[["Reaction ID", 'Reaction Name', 'Subsystem', 'Reaction Formula',
'Lower Bound', 'Upper Bound', 'Gene Reaction Rule' ]]
metab_dict = []
for metab in model_iEK.metabolites:
metab_dict.append({'Metabolite ID': str(metab.id),
'Metabolite Name': str(metab.name),
'Metabolite Formula': str(metab.formula),
})
metab_df = pd.DataFrame(metab_dict)
metab_df = metab_df[["Metabolite ID", 'Metabolite Name', 'Metabolite Formula']]
model_iEK.id
gene_essential_dic = []
for gene in model_iEK.genes:
g = str(gene)
if g in FN_dic.keys():
ko_growth_rate = FN_dic[g][0]
griff_p_val = FN_dic[g][1]
error_type = "False Negative"
if g in FP_dic.keys():
ko_growth_rate = FP_dic[g][0]
griff_p_val = FP_dic[g][1]
error_type = "False Positive"
if g in TN_dic.keys():
ko_growth_rate = TN_dic[g][0]
griff_p_val = TN_dic[g][1]
error_type = "True Negative"
if g in TP_dic.keys():
ko_growth_rate = TP_dic[g][0]
griff_p_val = TP_dic[g][1]
error_type = "True Positive"
gene_essential_dic.append({"Gene": g,
"KO Growth Rate": ko_growth_rate,
"Griffin Essentiality P-Value": griff_p_val,
"Error Type": error_type
})
gene_essen_df = pd.DataFrame(gene_essential_dic)
gene_essen_df = gene_essen_df[["Gene", "KO Growth Rate", "Griffin Essentiality P-Value", "Error Type"]]
gene_essen_df.head()
media_conditions = [model_iEK.medium, griffin_media, inVitro_drugTesting, inVivo_conditions]
media_df = pd.DataFrame(media_conditions)
media_df = media_df.transpose()
media_df.columns = ["Middlebrook m7H10 (Acetate C-source)",
"Griffin Media (Gene Essentiality)",
"Lowenstein-Jensen Media (Drug-Testing)",
"Physiological Media (in vivo modeling)"]
media_df.head()
writer = pd.ExcelWriter('iEK1020_supplementary.xlsx')
react_df.to_excel(writer, sheet_name='Reactions', index=False)
metab_df.to_excel(writer, sheet_name='Metabolites', index=False)
gene_essen_df.to_excel(writer, sheet_name="Gene Essentiality", index=False)
media_df.to_excel(writer, sheet_name="Media Conditions", index=False)
writer.save()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What you need to run this notebook...
Step2: Check default media condition
Step3: Check FBA solution
Step4: Check Mass Balance
Step5: Most of these are in the two FAD pathways which are independent of the rest of the model. Furthermore, these reactions do contain metabolites that lack both names, IDs, and formulas (I just need to spend a couple hours adding formulas to the metabolites). Not looking at these, only 9 reactions are unbalanced in the model.
Step6: Look at Blocked Reactions
Step7: Gene Essentiality
Step8: Change media
Step9: Calculate the Matthews correlation coefficient. It should be around 0.532.
Step10: in Vitro vs in Vivo simulations
Step11: Compute FVA for in vitro model
Step12: Compute FVA for in vivo model
Step13: Compare model simulations
Step14: Compare sampled spaces
Step15: in vivo conditions
Step16: Comparison Plots
Step17: Box plots
Step18: Output to excel sheet.
Step19: Prepare Griffin Essentiality sheet.
Step20: Prepare media conditions as JSON files.
Step21: Write everything to excel file.
|
4,384
|
<ASSISTANT_TASK:>
Python Code:
import os
from datetime import datetime
import numpy as np
import pandas as pd
import sklearn as sk
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from sklearn.grid_search import GridSearchCV
import xgboost as xgb
def check_NaN_Values_in_df(df):
# searching for NaN values is all the columns
for col in df:
nan_count = df[col].isnull().sum()
if nan_count != 0:
print (col + " => "+ str(nan_count) + " NaN Values")
df_all = pd.read_csv("output/enriched.csv", index_col=0)
# Check for NaN Values => We must find: country_destination => 62096 NaN Values
check_NaN_Values_in_df(df_all)
df_all.sample(n=5) # Only display a few lines and not the whole dataframe
# Loading training csv file
df_train = pd.read_csv("data/train_users_2.csv")
df_train.head(n=5) # Only display a few lines and not the whole dataframe
# Prepare training data for modelling
df_train.set_index('id', inplace=True)
df_train = pd.concat([df_train['country_destination'], df_all], axis=1, join='inner')
id_train = df_train.index.values
labels = df_train['country_destination']
le = LabelEncoder()
y = le.fit_transform(labels)
X = df_train.drop('country_destination', axis=1, inplace=False)
# Checking Up
print (y)
X.sample(n=5)
# Grid Search - Used to find best combination of parameters
XGB_model = xgb.XGBClassifier(objective='multi:softprob', subsample=0.5, colsample_bytree=0.5, seed=0)
param_grid = {'max_depth': [3, 4, 5], 'learning_rate': [0.1, 0.3], 'n_estimators': [25, 50]}
model = GridSearchCV(
estimator=XGB_model,
param_grid=param_grid,
scoring='accuracy',
verbose=10,
n_jobs=1,
iid=True,
refit=True, cv=3
)
model.fit(X, y)
print("Best score: %0.3f" % model.best_score_)
print("Best parameters set:")
best_parameters = model.best_estimator_.get_params()
for param_name in sorted(param_grid.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
# We create the output directory if necessary
if not os.path.exists("models"):
os.makedirs("models")
# save model to file
joblib.dump(model, 'models/0001.model')
df_test = pd.read_csv("data/test_users.csv")
df_test.head(n=5) # Only display a few lines and not the whole dataframe
# Prepare test data for prediction
df_test.set_index('id', inplace=True)
df_test = pd.merge(df_test.loc[:,['date_first_booking']], df_all, how='left', left_index=True, right_index=True, sort=False)
X_test = df_test.drop('date_first_booking', axis=1, inplace=False)
X_test = X_test.fillna(-1)
id_test = df_test.index.values
# Make predictions
y_pred = model.predict_proba(X_test)
#Taking the 5 classes with highest probabilities
ids = [] #list of ids
cts = [] #list of countries
for i in range(len(id_test)):
idx = id_test[i]
ids += [idx] * 5
cts += le.inverse_transform(np.argsort(y_pred[i])[::-1])[:5].tolist()
# We create the output directory if necessary
if not os.path.exists("output"):
os.makedirs("output")
#Generate submission
sub = pd.DataFrame(np.column_stack((ids, cts)), columns=['id', 'country'])
sub.to_csv('output/submission.csv', index=False, sep=',')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Re-Import the Check NaN Function from Part 2
Step2: 3. Loading in the Data from Part 3
Step3: 4. Getting a training dataset
Step4: 5. Training an XGBoost Model
Step5: 6. Saving the model
Step6: 7. Making the Predictions
Step7: 8. Saving Predictions
|
4,385
|
<ASSISTANT_TASK:>
Python Code:
# built-in python modules
import os
import inspect
# scientific python add-ons
import numpy as np
import pandas as pd
# plotting stuff
# first line makes the plots appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
# finally, we import the pvlib library
import pvlib
# Find the absolute file path to your pvlib installation
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(pvlib)))
# absolute path to a data file
datapath = os.path.join(pvlib_abspath, 'data', '703165TY.csv')
# read tmy data with year values coerced to a single year
tmy_data, meta = pvlib.iotools.read_tmy3(datapath, coerce_year=2015)
tmy_data.index.name = 'Time'
# TMY data seems to be given as hourly data with time stamp at the end
# shift the index 30 Minutes back for calculation of sun positions
tmy_data = tmy_data.shift(freq='-30Min')['2015']
tmy_data.head()
tmy_data['GHI'].plot()
plt.ylabel('Irradiance (W/m**2)');
surface_tilt = 30
surface_azimuth = 180 # pvlib uses 0=North, 90=East, 180=South, 270=West convention
albedo = 0.2
# create pvlib Location object based on meta data
sand_point = pvlib.location.Location(meta['latitude'], meta['longitude'], tz='US/Alaska',
altitude=meta['altitude'], name=meta['Name'].replace('"',''))
print(sand_point)
solpos = pvlib.solarposition.get_solarposition(tmy_data.index, sand_point.latitude, sand_point.longitude)
solpos.plot();
# the extraradiation function returns a simple numpy array
# instead of a nice pandas series. We will change this
# in a future version
dni_extra = pvlib.irradiance.get_extra_radiation(tmy_data.index)
dni_extra = pd.Series(dni_extra, index=tmy_data.index)
dni_extra.plot()
plt.ylabel('Extra terrestrial radiation (W/m**2)');
airmass = pvlib.atmosphere.get_relative_airmass(solpos['apparent_zenith'])
airmass.plot()
plt.ylabel('Airmass');
poa_sky_diffuse = pvlib.irradiance.haydavies(surface_tilt, surface_azimuth,
tmy_data['DHI'], tmy_data['DNI'], dni_extra,
solpos['apparent_zenith'], solpos['azimuth'])
poa_sky_diffuse.plot()
plt.ylabel('Irradiance (W/m**2)');
poa_ground_diffuse = pvlib.irradiance.get_ground_diffuse(surface_tilt, tmy_data['GHI'], albedo=albedo)
poa_ground_diffuse.plot()
plt.ylabel('Irradiance (W/m**2)');
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth, solpos['apparent_zenith'], solpos['azimuth'])
aoi.plot()
plt.ylabel('Angle of incidence (deg)');
poa_irrad = pvlib.irradiance.poa_components(aoi, tmy_data['DNI'], poa_sky_diffuse, poa_ground_diffuse)
poa_irrad.plot()
plt.ylabel('Irradiance (W/m**2)')
plt.title('POA Irradiance');
thermal_params = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']['open_rack_glass_polymer']
pvtemps = pvlib.temperature.sapm_cell(poa_irrad['poa_global'], tmy_data['DryBulb'], tmy_data['Wspd'], **thermal_params)
pvtemps.plot()
plt.ylabel('Temperature (C)');
sandia_modules = pvlib.pvsystem.retrieve_sam(name='SandiaMod')
sandia_module = sandia_modules.Canadian_Solar_CS5P_220M___2009_
sandia_module
effective_irradiance = pvlib.pvsystem.sapm_effective_irradiance(poa_irrad.poa_direct, poa_irrad.poa_diffuse, airmass, aoi, sandia_module)
sapm_out = pvlib.pvsystem.sapm(effective_irradiance, pvtemps, sandia_module)
print(sapm_out.head())
sapm_out[['p_mp']].plot()
plt.ylabel('DC Power (W)');
cec_modules = pvlib.pvsystem.retrieve_sam(name='CECMod')
cec_module = cec_modules.Canadian_Solar_Inc__CS5P_220M
d = {k: cec_module[k] for k in ['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref', 'R_s']}
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = (
pvlib.pvsystem.calcparams_desoto(poa_irrad.poa_global,
pvtemps,
cec_module['alpha_sc'],
EgRef=1.121,
dEgdT=-0.0002677, **d))
single_diode_out = pvlib.pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
single_diode_out[['p_mp']].plot()
plt.ylabel('DC Power (W)');
sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')
sapm_inverter = sapm_inverters['ABB__MICRO_0_25_I_OUTD_US_208__208V_']
sapm_inverter
p_acs = pd.DataFrame()
p_acs['sapm'] = pvlib.inverter.sandia(sapm_out.v_mp, sapm_out.p_mp, sapm_inverter)
p_acs['sd'] = pvlib.inverter.sandia(single_diode_out.v_mp, single_diode_out.p_mp, sapm_inverter)
p_acs.plot()
plt.ylabel('AC Power (W)');
diff = p_acs['sapm'] - p_acs['sd']
diff.plot()
plt.ylabel('SAPM - SD Power (W)');
p_acs.loc['2015-07-05':'2015-07-06'].plot();
p_acs.describe()
p_acs.sum()
# create data for a y=x line
p_ac_max = p_acs.max().max()
yxline = np.arange(0, p_ac_max)
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111, aspect='equal')
sc = ax.scatter(p_acs['sd'], p_acs['sapm'], c=poa_irrad.poa_global, alpha=1)
ax.plot(yxline, yxline, 'r', linewidth=3)
ax.set_xlim(0, None)
ax.set_ylim(0, None)
ax.set_xlabel('Single Diode model')
ax.set_ylabel('Sandia model')
fig.colorbar(sc, label='POA Global (W/m**2)');
def sapm_sd_scatter(c_data, label=None, **kwargs):
Display a scatter plot of SAPM p_ac vs. single diode p_ac.
You need to re-execute this cell if you re-run the p_ac calculation.
Parameters
----------
c_data : array-like
Determines the color of each point on the scatter plot.
Must be same length as p_acs.
kwargs passed to ``scatter``.
Returns
-------
tuple of fig, ax objects
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111, aspect='equal')
sc = ax.scatter(p_acs['sd'], p_acs['sapm'], c=c_data, alpha=1, **kwargs)
ax.plot(yxline, yxline, 'r', linewidth=3)
ax.set_xlim(0, None)
ax.set_ylim(0, None)
ax.set_xlabel('Single diode model power (W)')
ax.set_ylabel('Sandia model power (W)')
fig.colorbar(sc, label='{}'.format(label), shrink=0.75)
return fig, ax
sapm_sd_scatter(tmy_data.DryBulb, label='Temperature (deg C)');
sapm_sd_scatter(tmy_data.DNI, label='DNI (W/m**2)');
sapm_sd_scatter(tmy_data.AOD, label='AOD');
sapm_sd_scatter(tmy_data.Wspd, label='Wind speed', vmax=10);
def sapm_other_scatter(c_data, x_data, clabel=None, xlabel=None, aspect_equal=False, **kwargs):
Display a scatter plot of SAPM p_ac vs. something else.
You need to re-execute this cell if you re-run the p_ac calculation.
Parameters
----------
c_data : array-like
Determines the color of each point on the scatter plot.
Must be same length as p_acs.
x_data : array-like
kwargs passed to ``scatter``.
Returns
-------
tuple of fig, ax objects
fig = plt.figure(figsize=(12,12))
if aspect_equal:
ax = fig.add_subplot(111, aspect='equal')
else:
ax = fig.add_subplot(111)
sc = ax.scatter(x_data, p_acs['sapm'], c=c_data, alpha=1, cmap=mpl.cm.YlGnBu_r, **kwargs)
ax.set_xlim(0, None)
ax.set_ylim(0, None)
ax.set_xlabel('{}'.format(xlabel))
ax.set_ylabel('Sandia model power (W)')
fig.colorbar(sc, label='{}'.format(clabel), shrink=0.75)
return fig, ax
sapm_other_scatter(tmy_data.DryBulb, tmy_data.GHI, clabel='Temperature (deg C)', xlabel='GHI (W/m**2)');
def pvusa(pvusa_data, a, b, c, d):
Calculates system power according to the PVUSA equation
P = I * (a + b*I + c*W + d*T)
where
P is the output power,
I is the plane of array irradiance,
W is the wind speed, and
T is the temperature
Parameters
----------
pvusa_data : pd.DataFrame
Must contain the columns 'I', 'W', and 'T'
a : float
I coefficient
b : float
I*I coefficient
c : float
I*W coefficient
d : float
I*T coefficient
Returns
-------
power : pd.Series
Power calculated using the PVUSA model.
return pvusa_data['I'] * (a + b*pvusa_data['I'] + c*pvusa_data['W'] + d*pvusa_data['T'])
from scipy import optimize
pvusa_data = pd.DataFrame()
pvusa_data['I'] = poa_irrad.poa_global
pvusa_data['W'] = tmy_data.Wspd
pvusa_data['T'] = tmy_data.DryBulb
popt, pcov = optimize.curve_fit(pvusa, pvusa_data.dropna(), p_acs.sapm.values, p0=(.0001,0.0001,.001,.001))
print('optimized coefs:\n{}'.format(popt))
print('covariances:\n{}'.format(pcov))
power_pvusa = pvusa(pvusa_data, *popt)
fig, ax = sapm_other_scatter(tmy_data.DryBulb, power_pvusa, clabel='Temperature (deg C)',
aspect_equal=True, xlabel='PVUSA (W)')
maxmax = max(ax.get_xlim()[1], ax.get_ylim()[1])
ax.set_ylim(None, maxmax)
ax.set_xlim(None, maxmax)
ax.plot(np.arange(maxmax), np.arange(maxmax), 'r');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load TMY data
Step2: The file handling above looks complicated because we're trying to account for the many different ways that people will run this notebook on their systems. You can just put a simple string path into the read_tmy3 function if you know where the file is.
Step3: This is a pandas DataFrame object. It has a lot of great properties that are beyond the scope of our tutorials.
Step4: Calculate modeling intermediates
Step5: Solar position
Step6: The funny looking jump in the azimuth is just due to the coarse time sampling in the TMY file.
Step7: Airmass
Step8: The funny appearance is due to aliasing and setting invalid numbers equal to NaN. Replot just a day or two and you'll see that the numbers are right.
Step9: POA ground diffuse
Step10: AOI
Step11: Note that AOI has values greater than 90 deg. This is ok.
Step12: Cell temperature
Step13: DC power using SAPM
Step14: Choose a particular module
Step15: Calculate the effective irradiance
Step16: Run the SAPM using the parameters we calculated above.
Step17: DC power using single diode
Step18: AC power using SAPM
Step19: Choose a particular inverter
Step20: Plot just a few days.
Step21: Some statistics on the AC power
Step23: We can change the value of color value c to see the sensitivity of model accuracy to measured meterological conditions. It can be useful to define a simple plotting function for this kind of exploratory analysis.
Step25: Notice the use of the vmax keyword argument in the above example. The **kwargs pattern allows us to easily pass non-specified arguments to nested functions.
Step27: Next, we will assume that the SAPM model is representative of the real world performance so that we can use scipy's optimization routine to derive simulated PVUSA coefficients. You will need to install scipy to run these functions.
|
4,386
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
from matplotlib import pyplot as plt
%matplotlib inline
import time
import numpy as np
import pickle
import pandas
import gzip
sys.path.append("..")
import cca_core
def _plot_helper(arr, xlabel, ylabel):
plt.plot(arr, lw=2.0)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
# Load up conv 2 activations from SVHN
with gzip.open("./model_activations/SVHN/model_0_lay03.p", "rb") as f:
acts1 = pickle.load(f)
with gzip.open("./model_activations/SVHN/model_1_lay03.p", "rb") as f:
acts2 = pickle.load(f)
print(acts1.shape, acts2.shape)
num_datapoints, h, w, channels = acts1.shape
f_acts1 = acts1.reshape((num_datapoints*h*w, channels))
num_datapoints, h, w, channels = acts2.shape
f_acts2 = acts2.reshape((num_datapoints*h*w, channels))
print(f_acts1.shape, f_acts2.shape)
start = time.time()
f_results = cca_core.get_cca_similarity(f_acts1.T, f_acts2.T, epsilon=1e-10, verbose=False)
print('Time: {:.2f} seconds'.format(time.time() - start))
_plot_helper(f_results["cca_coef1"], "CCA Coef idx", "CCA coef value")
start = time.time()
f_results = cca_core.get_cca_similarity(f_acts1.T[:,::2], f_acts2.T[:,::2], epsilon=1e-10, verbose=False)
print('Time: {:.2f} seconds'.format(time.time() - start))
_plot_helper(f_results["cca_coef1"], "CCA Coef idx", "CCA coef value")
with gzip.open("./model_activations/SVHN/model_1_lay04.p", "rb") as f:
pool2 = pickle.load(f)
print("shape of first conv", acts1.shape, "shape of second conv", pool2.shape)
avg_acts1 = np.mean(acts1, axis=(1,2))
avg_pool2 = np.mean(pool2, axis=(1,2))
print("shapes after average pool over spatial dimensions", avg_acts1.shape, avg_pool2.shape)
a_results = cca_core.get_cca_similarity(avg_acts1.T, avg_pool2.T, epsilon=1e-10, verbose=False)
_plot_helper(a_results["cca_coef1"], "CCA Coef idx", "CCA coef value")
from scipy import interpolate
num_d, h, w, _ = acts1.shape
num_c = pool2.shape[-1]
pool2_interp = np.zeros((num_d, h, w, num_c))
for d in range(num_d):
for c in range(num_c):
# form interpolation function
idxs1 = np.linspace(0, pool2.shape[1],
pool2.shape[1],
endpoint=False)
idxs2 = np.linspace(0, pool2.shape[2],
pool2.shape[2],
endpoint=False)
arr = pool2[d,:,:,c]
f_interp = interpolate.interp2d(idxs1, idxs2, arr)
# creater larger arr
large_idxs1 = np.linspace(0, pool2.shape[1],
acts1.shape[1],
endpoint=False)
large_idxs2 = np.linspace(0, pool2.shape[2],
acts1.shape[2],
endpoint=False)
pool2_interp[d, :, :, c] = f_interp(large_idxs1, large_idxs2)
print("new shape", pool2_interp.shape)
num_datapoints, h, w, channels = acts1.shape
f_acts1 = acts1.reshape((num_datapoints*h*w, channels))
num_datapoints, h, w, channels = pool2_interp.shape
f_pool2 = pool2_interp.reshape((num_datapoints*h*w, channels))
f_results = cca_core.get_cca_similarity(f_acts1.T[:,::5], f_pool2.T[:,::5], epsilon=1e-10, verbose=False)
_plot_helper(f_results["cca_coef1"], "CCA Coef idx", "CCA coef value")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the introduction tutorial, we looked at a simple way of thinking of neural network representations (neurons as activation vectors and layers as subspaces) and the applicability of CCA as way to analyze these representations.
Step2: These activations have shape
Step3: The motivation for doing this is that each spatial location (i, j) in a channel sees a different image patch, and so is like an additional datapoint.
Step4: We can also do this after subsampling some of the datapoints to reduce computation overheads. The results are very stable to this as the number of channels we have (64) is significantly smaller than the number of datapoints
Step5: Conv Layers with Different Spatial Dimensions
Step6: Here we want to compare conv2 in model 0 with the maxpool activations in model 1. We can't just turn the spatial dimensions into datapoints like before, because then the number of datapoints will be different for each layer. Remember, CCA can make comparisons across different numbers of neurons (in our case different numbers of channels) but the data dimension has to be the same.
Step7: Interpolate
Step8: For this example, we just used the default interpolation (linear) in the interp2d function. There are many different interpolations possible that would be interesting to explore.
|
4,387
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_src_connectivity
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50, npad='auto')
# Read the source space we are morphing to
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
morph = mne.compute_source_morph(stc, 'sample', 'fsaverage',
spacing=fsave_vertices, smooth=20,
subjects_dir=subjects_dir)
stc = morph.apply(stc)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep
n_subjects1, n_subjects2 = 7, 9
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
print('Computing connectivity.')
connectivity = spatial_src_connectivity(src)
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_threshold, buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both',
views='lateral', subjects_dir=subjects_dir,
time_label='Duration significant (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Compute statistic
Step3: Visualize the clusters
|
4,388
|
<ASSISTANT_TASK:>
Python Code:
import statsmodels.formula.api as smf
import pandas as pd
import numpy as np
import scipy as sp
# Create some example data
cs= [12, 14, 16, 18] # classes of carbons
ds = [0, 1, 2, 3] # classes of double bonds
df = pd.DataFrame({'RT': np.random.uniform(low=0.1, high=15, size=1000), 'Carbon': np.random.choice(cs, size=1000), 'DB': np.random.choice(ds, size=1000)})
df.head()
# Write out my R-style formula
formula = 'RT ~ C(Carbon) + C(DB)'
# Generate a model using the formula and dataframe. This steps builds all of the matrices needed for OLS.
model = smf.ols(formula, df)
# Fit the model and get the results output
results = model.fit()
# Print an overall summary of the model
results.summary()
# Only showing the first 5 to keep things short
dir(results)[:5]
rSquared = results.rsquared
print rSquared
from statsmodels.stats.outliers_influence import summary_table
# Use summary table to pull of the stats that we need
st, data, header = summary_table(results, alpha=0.05)
header
head2index = {
'obs': 0,
'dep': 1,
'pred': 2,
'SEpred': 3,
'CIlow': 4,
'CIhi': 5,
'PIlow': 6,
'PIhi': 7,
'residual': 8,
'SEres': 9,
'student': 10,
'cooksd': 11
}
# Get fitted values
fittedvalues = np.sort(data[:, head2index['pred']])
# Get Upper and lower Predictition Intervals
predHi = np.sort(data[:, head2index['PIhi']])
predLow = np.sort(data[:, head2index['PIlow']])
# I think thse are the same as those output by
# from statsmodels.sandbox.regression.predstd import wls_prediction_std
# wls_prediction_std(results)
# but need to double check
# Get upper and lower Confidence Intervals
ciHi = np.sort(data[:, head2index['CIhi']])
ciLow = np.sort(data[:, head2index['CIlow']])
# Get Cook's D
cooks = data[:, head2index['cooksd']]
# Import plotting library
import matplotlib.pyplot as plt
# Ignore this command, it allows inline plotting in ipython
%matplotlib inline
# Get original data values
x = df.index.values
y = df['RT'].values
# Make Figure with only 1 plot
fig, ax = plt.subplots(1, 1, figsize=(20,20))
# Plot original data
ax.scatter(x, y)
# Plot fitted data and confidence regions
ax.plot(x, fittedvalues, 'g-', lw=2)
ax.plot(x, predHi, 'r--', lw=2)
ax.plot(x, predLow, 'r--', lw=2)
ax.plot(x, ciHi, 'y--', lw=2)
ax.plot(x, ciLow, 'y--', lw=2)
# Adjust plot to make it look a little better
ax.set_xlabel('Row', fontsize=18)
ax.set_ylabel('RT', fontsize=18)
ax.set_title('Regression of Carbon and Bond number on RT', fontsize=24)
ax.set_xlim(0, 1000)
infl = results.get_influence()
dir(infl)
# Cook's D stat (same as above method)
cooksD = infl.cooks_distance
# PRESS residuals
press = infl.resid_press
# DFFITS provides the value and the cutoff threshold
(dffits, dffitsThres) = infl.dffits
# Flag DFFITS if greater than threshold
flagDffits = dffits > dffitsThres
# Make pretty table for everything
pretty = pd.DataFrame({
'cooksD': cooksD[0],
'dffits': dffits,
'dffits thresh': [dffitsThres]*1000,
'flag_dffits': flagDffits.astype(int),
'press resid': press
})
pretty.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Example Datasets
Step2: Create OLS output
Step3: Access results attributes
Step4: There are two $R^2$ values that you can get from these results
Step5: Get influence measures
Step6: Summary table outputs 3 different datasets
Step7: For our sanity it will help to map a short header name with the column number. I just print out the header and make a dictionary relating a short version of the header with the column number. You don't really need to do this, but it makes things a little cleaner.
Step8: Notice there is no DFFITs, I use a different method below to handle this. Warning
Step9: Make a plot with prediction intervals
Step10: The plot above shows the following. Remember this is simulated data, so it looks a little strange.
|
4,389
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import math
import rasterio
# This notebook explores a single 4 band (blue, green, red, NIR) PlanetScope scene in a UTM projection.
image_file = "example.tif"
satdat = rasterio.open(image_file)
# Minimum bounding box in projected units
print(satdat.bounds)
# Get dimensions, in map units (using the example GeoTIFF, that's meters)
width_in_projected_units = satdat.bounds.right - satdat.bounds.left
height_in_projected_units = satdat.bounds.top - satdat.bounds.bottom
print("Width: {}, Height: {}".format(width_in_projected_units, height_in_projected_units))
# Number of rows and columns.
print("Rows: {}, Columns: {}".format(satdat.height, satdat.width))
# This dataset's projection uses meters as distance units. What are the dimensions of a single pixel in meters?
xres = (satdat.bounds.right - satdat.bounds.left) / satdat.width
yres = (satdat.bounds.top - satdat.bounds.bottom) / satdat.height
print(xres, yres)
print("Are the pixels square: {}".format(xres == yres))
# Get coordinate reference system
satdat.crs
# Convert pixel coordinates to world coordinates.
# Upper left pixel
row_min = 0
col_min = 0
# Lower right pixel. Rows and columns are zero indexing.
row_max = satdat.height - 1
col_max = satdat.width - 1
# Transform coordinates with the dataset's affine transformation.
topleft = satdat.transform * (row_min, col_min)
botright = satdat.transform * (row_max, col_max)
print("Top left corner coordinates: {}".format(topleft))
print("Bottom right corner coordinates: {}".format(botright))
# All of the metadata required to create an image of the same dimensions, datatype, format, etc. is stored in
# one location.
print(satdat.meta)
# The dataset reports a band count.
print(satdat.count)
# And provides a sequence of band indexes. These are one indexing, not zero indexing like Numpy arrays.
print(satdat.indexes)
# PlanetScope 4-band band order: BGRN
blue, green, red, nir = satdat.read()
# Or the slightly less efficient:
# blue = satdat.read(1)
# green = satdat.read(2)
# red = satdat.read(3)
# nir = satdat.read(4)
# Or read the entire dataset into a single 3D array:
# data = satdat.read()
# Bands are stored as Numpy arrays.
print(type(blue))
# How many dimensions would a single raster band have? Two dimensions: rows and columns.
print(blue.ndim)
# Glimpse at the band's values and datatype.
print(blue)
print(blue.dtype)
# Output a min & max pixel value in each band.
for bidx in satdat.indexes:
data = satdat.read(bidx)
print("Band {bidx} min {min} max {max}".format(bidx=bidx, min=data.min(), max=data.max()))
# And an overall min/max for the entire dataset.
data = satdat.read()
print("Overall min/max: {} {}".format(data.min(), data.max()))
# Let's grab the pixel 2km east and 2km south of the upper left corner
# World coordinates for the desired pixel.
x_coord = satdat.bounds.left - 2000
y_coord = satdat.bounds.top + 2000
# Convert world coordinates to pixel. World coordinates may not transform precisely to row and column indexes,
# but a Numpy array can only be indexed by integer values. The 'op' parameter for 'satdat.index()' determines
# how the transformed values are rounded. In some cases any point falling within a pixel should be considered
# contained, and in other cases only points falling within one portion of the pixels hould be considered contained.
# The 'op' parameter lets users make this decision on their own. The values must still be cast to integers.
col, row = satdat.index(x_coord, y_coord, op=math.floor)
col = int(col)
row = int(row)
# Now let's look at the value of each band at this pixel
print("Red: {}".format(red[row, col]))
print("Green: {}".format(green[row, col]))
print("Blue: {}".format(blue[row, col]))
print("NIR: {}".format(nir[row, col]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic details
Step2: Bands
Step3: Because we know we're look at a PlanetScope 4-band analytic satellite image, we can define the bands by their order
Step4: Pixels
|
4,390
|
<ASSISTANT_TASK:>
Python Code:
def sort(L):
n = len(L)
if n < 2:
return L
L1, L2 = L[:n//2], L[n//2:]
return merge(sort(L1), sort(L2))
def merge(L1, L2):
if L1 == []:
return L2
if L2 == []:
return L1
x1, *R1 = L1
x2, *R2 = L2
if x1 <= x2:
return [x1] + merge(R1, L2)
else:
return [x2] + merge(L1, R2)
sort([7, 8, 11, 12, 2, 5, 3, 7, 9, 3, 2])
import random as rnd
from collections import Counter
Counter(['a', 'b', 'a', 'b', 'c', 'a'])
def demo():
L = [ rnd.randrange(1, 99+1) for n in range(1, 19+1) ]
print("L = ", L)
S = L[:]
S = sort(S)
print("S = ", S)
print(Counter(L))
print(Counter(S))
print(Counter(L) == Counter(S))
demo()
def isOrdered(L):
for i in range(len(L) - 1):
assert L[i] <= L[i+1], f'{L} not ordered at {i}'
def sameElements(L, S):
assert Counter(L) == Counter(S)
def testSort(n, k):
for i in range(n):
L = [ rnd.randrange(2*k) for x in range(k) ]
S = sort(L)
isOrdered(S)
sameElements(L, S)
print('.', end='')
print()
print("All tests successful!")
%%time
testSort(100, 2000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We still need to specify how two sorted lists $L_1$ and $L_2$ are merged in a way that the resulting
Step2: Testing
Step3: We import the class Counter from the module collections. This module provides us with a dictionary that keeps count
Step4: The function isOrdered(L) checks that the list L is sorted ascendingly.
Step5: The function sameElements(L, S) returns Trueif the lists L and S contain the same elements and, furthermore, each
Step6: The function $\texttt{testSort}(n, k)$ generates $n$ random lists of length $k$, sorts them, and checks whether the output is sorted and contains the same elements as the input.
Step7: Due to the recursion limit in Python we can only sort lists of length 2000.
|
4,391
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use the latest version of pip.
!pip install --upgrade pip
!pip install --upgrade "tfx[kfp]<2"
# docs_infra: no_execute
import sys
if not 'google.colab' in sys.modules:
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import tensorflow as tf
print('TensorFlow version: {}'.format(tf.__version__))
from tfx import v1 as tfx
print('TFX version: {}'.format(tfx.__version__))
import kfp
print('KFP version: {}'.format(kfp.__version__))
GOOGLE_CLOUD_PROJECT = '' # <--- ENTER THIS
GOOGLE_CLOUD_REGION = 'us-central1'
GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + '-gcs'
if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME):
from absl import logging
logging.error('Please set all required parameters.')
!gcloud config set project {GOOGLE_CLOUD_PROJECT}
PIPELINE_NAME = 'penguin-vertex-pipelines'
# Path to various pipeline artifact.
PIPELINE_ROOT = 'gs://{}/pipeline_root/{}'.format(
GCS_BUCKET_NAME, PIPELINE_NAME)
# Paths for users' Python module.
MODULE_ROOT = 'gs://{}/pipeline_module/{}'.format(
GCS_BUCKET_NAME, PIPELINE_NAME)
# Paths for input data.
DATA_ROOT = 'gs://{}/data/{}'.format(GCS_BUCKET_NAME, PIPELINE_NAME)
# This is the path where your model will be pushed for serving.
SERVING_MODEL_DIR = 'gs://{}/serving_model/{}'.format(
GCS_BUCKET_NAME, PIPELINE_NAME)
print('PIPELINE_ROOT: {}'.format(PIPELINE_ROOT))
!gsutil cp gs://download.tensorflow.org/data/palmer_penguins/penguins_processed.csv {DATA_ROOT}/
!gsutil cat {DATA_ROOT}/penguins_processed.csv | head
_trainer_module_file = 'penguin_trainer.py'
%%writefile {_trainer_module_file}
# Copied from https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple
from typing import List
from absl import logging
import tensorflow as tf
from tensorflow import keras
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
from tensorflow_metadata.proto.v0 import schema_pb2
_FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
# Since we're not generating or creating a schema, we will instead create
# a feature spec. Since there are a fairly small number of features this is
# manageable for this dataset.
_FEATURE_SPEC = {
**{
feature: tf.io.FixedLenFeature(shape=[1], dtype=tf.float32)
for feature in _FEATURE_KEYS
},
_LABEL_KEY: tf.io.FixedLenFeature(shape=[1], dtype=tf.int64)
}
def _input_fn(file_pattern: List[str],
data_accessor: tfx.components.DataAccessor,
schema: schema_pb2.Schema,
batch_size: int) -> tf.data.Dataset:
Generates features and label for training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: schema of the input data.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
return data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_LABEL_KEY),
schema=schema).repeat()
def _make_keras_model() -> tf.keras.Model:
Creates a DNN Keras model for classifying penguin data.
Returns:
A Keras Model.
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [keras.layers.Input(shape=(1,), name=f) for f in _FEATURE_KEYS]
d = keras.layers.concatenate(inputs)
for _ in range(2):
d = keras.layers.Dense(8, activation='relu')(d)
outputs = keras.layers.Dense(3)(d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(1e-2),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
# This schema is usually either an output of SchemaGen or a manually-curated
# version provided by pipeline author. A schema can also derived from TFT
# graph if a Transform component is used. In the case when either is missing,
# `schema_from_feature_spec` could be used to generate schema from very simple
# feature_spec, but the schema returned would be very primitive.
schema = schema_utils.schema_from_feature_spec(_FEATURE_SPEC)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
schema,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
schema,
batch_size=_EVAL_BATCH_SIZE)
model = _make_keras_model()
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
# The result of the training should be saved in `fn_args.serving_model_dir`
# directory.
model.save(fn_args.serving_model_dir, save_format='tf')
!gsutil cp {_trainer_module_file} {MODULE_ROOT}/
# Copied from https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple and
# slightly modified because we don't need `metadata_path` argument.
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
) -> tfx.dsl.Pipeline:
Creates a three component penguin pipeline with TFX.
# Brings data into the pipeline.
example_gen = tfx.components.CsvExampleGen(input_base=data_root)
# Uses user-provided Python function that trains a model.
trainer = tfx.components.Trainer(
module_file=module_file,
examples=example_gen.outputs['examples'],
train_args=tfx.proto.TrainArgs(num_steps=100),
eval_args=tfx.proto.EvalArgs(num_steps=5))
# Pushes the model to a filesystem destination.
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
# Following three components will be included in the pipeline.
components = [
example_gen,
trainer,
pusher,
]
return tfx.dsl.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components)
import os
PIPELINE_DEFINITION_FILE = PIPELINE_NAME + '_pipeline.json'
runner = tfx.orchestration.experimental.KubeflowV2DagRunner(
config=tfx.orchestration.experimental.KubeflowV2DagRunnerConfig(),
output_filename=PIPELINE_DEFINITION_FILE)
# Following function will write the pipeline definition to PIPELINE_DEFINITION_FILE.
_ = runner.run(
_create_pipeline(
pipeline_name=PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_root=DATA_ROOT,
module_file=os.path.join(MODULE_ROOT, _trainer_module_file),
serving_model_dir=SERVING_MODEL_DIR))
# docs_infra: no_execute
from google.cloud import aiplatform
from google.cloud.aiplatform import pipeline_jobs
aiplatform.init(project=GOOGLE_CLOUD_PROJECT, location=GOOGLE_CLOUD_REGION)
job = pipeline_jobs.PipelineJob(template_path=PIPELINE_DEFINITION_FILE,
display_name=PIPELINE_NAME)
job.run(sync=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple TFX Pipeline for Vertex Pipelines
Step2: Restart the runtime
Step3: Check the package versions.
Step4: Set up variables
Step5: Set gcloud to use your project.
Step6: Prepare example data
Step7: Take a quick look at the CSV file.
Step11: You should be able to see five values. species is one of 0, 1 or 2, and all other features should have values between 0 and 1.
Step12: Copy the module file to GCS which can be accessed from the pipeline components.
Step14: Write a pipeline definition
Step15: Run the pipeline on Vertex Pipelines.
Step16: The generated definition file can be submitted using kfp client.
|
4,392
|
<ASSISTANT_TASK:>
Python Code:
using JuMP
m = Model()
# Definig variable
Classes = ["A", "B", "C"]
Shift = ["R", "O"]
@variable(m, x[Classes, Shift] >= 0)
# Define Constraints
@constraints m begin
2x["B","R"] + 3x["C","R"] <= 40
2x["B","O"] + 3x["C","O"] <= 35
3x["A","R"] + x["B","R"] + 3x["C","R"] + x["B","O"] + 3x["C","O"] <= 60
3x["A","R"] + 6x["B","R"] + 2x["C","R"] + 6x["B","O"] + 2x["C","O"] <= 200
5x["A","R"] + 4x["B","R"] + 4x["B","O"] <= 160
x["A","R"] >= 10
x["B","R"] + x["B","O"] >= 10
x["C","R"] + x["C","O"] >= 5
end
@objective(m, Max, 200x["A","R"] + 300x["B","R"] + 100x["C","R"] + 292x["B","O"] + 88x["C","O"])
print(m)
solve(m)
println("Optimal Profit: ", getobjectivevalue(m))
println(getvalue(x))
writeLP(m, "printing.lp")
!less printing.lp
!glpsol --cpxlp printing.lp --ranges printing.sen
!less printing.sen
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PART A
Step2: Change Kernel to Python
|
4,393
|
<ASSISTANT_TASK:>
Python Code:
#
# When the cursor is in this cell hit "shift enter" to execute the python code here
#
x=3 # x is assigned an integer value of 3
y=2.4 # y is assigned a floating point value of 2.4
print("x and y are:", x, 'and',y)
z=x+y
print("The value of z is:", z)
aList = [1,2,9,4,7]
print("We have a list:", aList, "with a length of:", len(aList))
aTuple = (1,2,9,4,7)
print("We have a tuple:", aTuple, "with a length of:", len(aTuple))
aList.append(17)
print("We now have a modified list:", aList, "with a length of ", len(aList))
aTuple.append(12) # this will fail! You can't append to a tuple.
print("element 3 of the list is", aList[3])
print("element 27 of the tuple is", aTuple[27]) # this will fail! There is no such element.
print(aList*3)
print(aTuple*4)
aString = "hello there world!"
print("We have a string:", aString, "whose length is:", len(aString))
print("the 9th element of aString is", aString[9])
print(aString.split()) # split a string on spaces, convert to a list
bString=1,2,3,4
5,6,7,8
9,10,11,12
print("Here's what bString looks like:", repr(bString))
print("Let's split the lines:", bString.splitlines())
print(aString.upper())
aDict = {'a':1, 'b':"sam", 'joe':3.1415927}
print("We have a dictionary with keys:", aDict.keys())
print("It as values:", aDict.values())
print("And items:", aDict.items())
print("You can index it like an array:", aDict['joe'])
# tell the plotting system that we want plots inside the notebook
%matplotlib inline
import pandas as pd # we're renaming pandas as 'pd' here to save typing
myDF = pd.DataFrame({'x':[1,2,3,4,5], 'y':[9,8,7,6,5]})
print("Here is x:", myDF.x.values)
print("Here is y:", myDF.y.values)
#
# It is possible to plot directly from the DataFrame
#
myDF.plot('x','y')
print("Starting with:", repr(bString)) # show the raw string
for s in bString.splitlines():
print("Breaking down the string:", s)
for n in s.split(','):
print("Found the item:", n)
#
# another way with the while loop
#
cList=bString.splitlines()
while len(cList)>0:
s=cList.pop(0) # pop off the zeroth element of the list
for n in s.split(','):
print("Found item:", n)
def myFunction(x): # function to compute 1.0/(x**2 + 1.0)
aLocalVar = x*x+1.0 # this is x**2 + 1.0
return 1.0/aLocalVar # finally 1.0/(x**2 + 1.0)
for i in range(10):
print("i=",i,"myFunction(i):", myFunction(i))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Like most programming languages you can operate on these values using unary and binary operators, like so
Step2: Tuples and Lists
Step3: The difference between these guys is that a list is 'mutable' and a tuple is not. In other words you can change a list by inserting, appending and deleting elements of the list, but a tuple, once born, cannot be modified.
Step4: Items within Tuples and lists can be accessed using their "index". Note that index values start at "0"
Step5: Note
Step6: Strings
Step8: String methods
Step9: Dictionaries
Step10: import this
Step11: The most important object defined in pandas is the DataFrame. Here's one way to create a DataFrame with pandas using a dictionary
Step12: This creates a DataFrame with two "Columns" x and y. You can fetch the columns individually
Step13: Logic and Loops
Step14: Functions
|
4,394
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
import shogun as sg
import numpy as np
# use scipy for generating samples
from scipy.stats import laplace, norm
def sample_gaussian_vs_laplace(n=220, mu=0.0, sigma2=1, b=np.sqrt(0.5)):
# sample from both distributions
X=norm.rvs(size=n)*np.sqrt(sigma2)+mu
Y=laplace.rvs(size=n, loc=mu, scale=b)
return X,Y
mu=0.0
sigma2=1
b=np.sqrt(0.5)
n=220
X,Y=sample_gaussian_vs_laplace(n, mu, sigma2, b)
# plot both densities and histograms
plt.figure(figsize=(18,5))
plt.suptitle("Gaussian vs. Laplace")
plt.subplot(121)
Xs=np.linspace(-2, 2, 500)
plt.plot(Xs, norm.pdf(Xs, loc=mu, scale=sigma2))
plt.plot(Xs, laplace.pdf(Xs, loc=mu, scale=b))
plt.title("Densities")
plt.xlabel("$x$")
plt.ylabel("$p(x)$")
plt.subplot(122)
plt.hist(X, alpha=0.5)
plt.xlim([-5,5])
plt.ylim([0,100])
plt.hist(Y,alpha=0.5)
plt.xlim([-5,5])
plt.ylim([0,100])
plt.legend(["Gaussian", "Laplace"])
plt.title('Samples');
print "Gaussian vs. Laplace"
print "Sample means: %.2f vs %.2f" % (np.mean(X), np.mean(Y))
print "Samples variances: %.2f vs %.2f" % (np.var(X), np.var(Y))
# turn data into Shogun representation (columns vectors)
feat_p=sg.RealFeatures(X.reshape(1,len(X)))
feat_q=sg.RealFeatures(Y.reshape(1,len(Y)))
# choose kernel for testing. Here: Gaussian
kernel_width=1
kernel=sg.GaussianKernel(10, kernel_width)
# create mmd instance of test-statistic
mmd=sg.QuadraticTimeMMD()
mmd.set_kernel(kernel)
mmd.set_p(feat_p)
mmd.set_q(feat_q)
# compute biased and unbiased test statistic (default is unbiased)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
biased_statistic=mmd.compute_statistic()
mmd.set_statistic_type(sg.ST_UNBIASED_FULL)
statistic=unbiased_statistic=mmd.compute_statistic()
print "%d x MMD_b[X,Y]^2=%.2f" % (len(X), biased_statistic)
print "%d x MMD_u[X,Y]^2=%.2f" % (len(X), unbiased_statistic)
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(200)
# now show a couple of ways to compute the test
# compute p-value for computed test statistic
p_value=mmd.compute_p_value(statistic)
print "P-value of MMD value %.2f is %.2f" % (statistic, p_value)
# compute threshold for rejecting H_0 for a given test power
alpha=0.05
threshold=mmd.compute_threshold(alpha)
print "Threshold for rejecting H0 with a test power of %.2f is %.2f" % (alpha, threshold)
# performing the test by hand given the above results, note that those two are equivalent
if statistic>threshold:
print "H0 is rejected with confidence %.2f" % alpha
if p_value<alpha:
print "H0 is rejected with confidence %.2f" % alpha
# or, compute the full two-sample test directly
# fixed test power, binary decision
binary_test_result=mmd.perform_test(alpha)
if binary_test_result:
print "H0 is rejected with confidence %.2f" % alpha
num_samples=500
# sample null distribution
null_samples=mmd.sample_null()
# sample alternative distribution, generate new data for that
alt_samples=np.zeros(num_samples)
for i in range(num_samples):
X=norm.rvs(size=n, loc=mu, scale=sigma2)
Y=laplace.rvs(size=n, loc=mu, scale=b)
feat_p=sg.RealFeatures(np.reshape(X, (1,len(X))))
feat_q=sg.RealFeatures(np.reshape(Y, (1,len(Y))))
# TODO: reset pre-computed kernel here
mmd.set_p(feat_p)
mmd.set_q(feat_q)
alt_samples[i]=mmd.compute_statistic()
np.std(alt_samples)
def plot_alt_vs_null(alt_samples, null_samples, alpha):
plt.figure(figsize=(18,5))
plt.subplot(131)
plt.hist(null_samples, 50, color='blue')
plt.title('Null distribution')
plt.subplot(132)
plt.title('Alternative distribution')
plt.hist(alt_samples, 50, color='green')
plt.subplot(133)
plt.hist(null_samples, 50, color='blue')
plt.hist(alt_samples, 50, color='green', alpha=0.5)
plt.title('Null and alternative distriution')
# find (1-alpha) element of null distribution
null_samples_sorted=np.sort(null_samples)
quantile_idx=int(len(null_samples)*(1-alpha))
quantile=null_samples_sorted[quantile_idx]
plt.axvline(x=quantile, ymin=0, ymax=100, color='red', label=str(int(round((1-alpha)*100))) + '% quantile of null')
legend();
plot_alt_vs_null(alt_samples, null_samples, alpha)
# optional: plot spectrum of joint kernel matrix
# TODO: it would be good if there was a way to extract the joint kernel matrix for all kernel tests
# get joint feature object and compute kernel matrix and its spectrum
feats_p_q=mmd.get_p_and_q()
mmd.get_kernel().init(feats_p_q, feats_p_q)
K=mmd.get_kernel().get_kernel_matrix()
w,_=np.linalg.eig(K)
# visualise K and its spectrum (only up to threshold)
plt.figure(figsize=(18,5))
plt.subplot(121)
plt.imshow(K, interpolation="nearest")
plt.title("Kernel matrix K of joint data $X$ and $Y$")
plt.subplot(122)
thresh=0.1
plt.plot(w[:len(w[w>thresh])])
title("Eigenspectrum of K until component %d" % len(w[w>thresh]));
# threshold for eigenspectrum
thresh=0.1
# compute number of eigenvalues to use
num_eigen=len(w[w>thresh])
# finally, do the test, use biased statistic
mmd.set_statistic_type(sg.ST_BIASED_FULL)
#tell Shogun to use spectrum approximation
mmd.set_null_approximation_method(sg.NAM_MMD2_SPECTRUM)
mmd.spectrum_set_num_eigenvalues(num_eigen)
mmd.set_num_null_samples(num_samples)
# the usual test interface
statistic=mmd.compute_statistic()
p_value_spectrum=mmd.compute_p_value(statistic)
print "Spectrum: P-value of MMD test is %.2f" % p_value_spectrum
# compare with ground truth from permutation test
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
p_value_permutation=mmd.compute_p_value(statistic)
print "Bootstrapping: P-value of MMD test is %.2f" % p_value_permutation
# tell Shogun to use gamma approximation
mmd.set_null_approximation_method(sg.NAM_MMD2_GAMMA)
# the usual test interface
statistic=mmd.compute_statistic()
p_value_gamma=mmd.compute_p_value(statistic)
print "Gamma: P-value of MMD test is %.2f" % p_value_gamma
# compare with ground truth bootstrapping
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
p_value_spectrum=mmd.compute_p_value(statistic)
print "Bootstrapping: P-value of MMD test is %.2f" % p_value_spectrum
# type I error is false alarm, therefore sample data under H0
num_trials=50
rejections_gamma=zeros(num_trials)
rejections_spectrum=zeros(num_trials)
rejections_bootstrap=zeros(num_trials)
num_samples=50
alpha=0.05
for i in range(num_trials):
X=norm.rvs(size=n, loc=mu, scale=sigma2)
Y=laplace.rvs(size=n, loc=mu, scale=b)
# simulate H0 via merging samples before computing the
Z=hstack((X,Y))
X=Z[:len(X)]
Y=Z[len(X):]
feat_p=sg.RealFeatures(reshape(X, (1,len(X))))
feat_q=sg.RealFeatures(reshape(Y, (1,len(Y))))
# gamma
mmd=sg.QuadraticTimeMMD(feat_p, feat_q)
mmd.set_kernel(kernel)
mmd.set_null_approximation_method(sg.NAM_MMD2_GAMMA)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_gamma[i]=mmd.perform_test(alpha)
# spectrum
mmd=sg.QuadraticTimeMMD(feat_p, feat_q)
mmd.set_kernel(kernel)
mmd.set_null_approximation_method(sg.NAM_MMD2_SPECTRUM)
mmd.spectrum_set_num_eigenvalues(num_eigen)
mmd.set_num_null_samples(num_samples)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_spectrum[i]=mmd.perform_test(alpha)
# bootstrap (precompute kernel)
mmd=sg.QuadraticTimeMMD(feat_p, feat_q)
p_and_q=mmd.get_p_and_q()
kernel.init(p_and_q, p_and_q)
precomputed_kernel=sg.CustomKernel(kernel)
mmd.set_kernel(precomputed_kernel)
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_bootstrap[i]=mmd.perform_test(alpha)
convergence_gamma=cumsum(rejections_gamma)/(arange(num_trials)+1)
convergence_spectrum=cumsum(rejections_spectrum)/(arange(num_trials)+1)
convergence_bootstrap=cumsum(rejections_bootstrap)/(arange(num_trials)+1)
print "Average rejection rate of H0 for Gamma is %.2f" % mean(convergence_gamma)
print "Average rejection rate of H0 for Spectrum is %.2f" % mean(convergence_spectrum)
print "Average rejection rate of H0 for Bootstrapping is %.2f" % mean(rejections_bootstrap)
# paramters of dataset
m=20000
distance=10
stretch=5
num_blobs=3
angle=pi/4
# these are streaming features
gen_p=sg.GaussianBlobsDataGenerator(num_blobs, distance, 1, 0)
gen_q=sg.GaussianBlobsDataGenerator(num_blobs, distance, stretch, angle)
# stream some data and plot
num_plot=1000
features=gen_p.get_streamed_features(num_plot)
features=features.create_merged_copy(gen_q.get_streamed_features(num_plot))
data=features.get_feature_matrix()
figure(figsize=(18,5))
subplot(121)
grid(True)
plot(data[0][0:num_plot], data[1][0:num_plot], 'r.', label='$x$')
title('$X\sim p$')
subplot(122)
grid(True)
plot(data[0][num_plot+1:2*num_plot], data[1][num_plot+1:2*num_plot], 'b.', label='$x$', alpha=0.5)
_=title('$Y\sim q$')
block_size=100
# if features are already under the streaming interface, just pass them
mmd=sg.LinearTimeMMD(gen_p, gen_q)
mmd.set_kernel(kernel)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
# compute an unbiased estimate in linear time
statistic=mmd.compute_statistic()
print "MMD_l[X,Y]^2=%.2f" % statistic
# note: due to the streaming nature, successive calls of compute statistic use different data
# and produce different results. Data cannot be stored in memory
for _ in range(5):
print "MMD_l[X,Y]^2=%.2f" % mmd.compute_statistic()
# data source
gen_p=sg.GaussianBlobsDataGenerator(num_blobs, distance, 1, 0)
gen_q=sg.GaussianBlobsDataGenerator(num_blobs, distance, stretch, angle)
num_samples=100
print "Number of data is %d" % num_samples
# retreive some points, store them as non-streaming data in memory
data_p=gen_p.get_streamed_features(num_samples)
data_q=gen_q.get_streamed_features(num_samples)
# example to create mmd (note that num_samples can be maximum the number of data in memory)
mmd=sg.LinearTimeMMD(data_p, data_q)
mmd.set_kernel(sg.GaussianKernel(10, 1))
mmd.set_num_blocks_per_burst(100)
print "Linear time MMD statistic: %.2f" % mmd.compute_statistic()
mmd=sg.LinearTimeMMD(gen_p, gen_q)
mmd.set_kernel(kernel)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
print "m=%d samples from p and q" % m
print "Binary test result is: " + ("Rejection" if mmd.perform_test(alpha) else "No rejection")
print "P-value test result is %.2f" % mmd.compute_p_value(mmd.compute_statistic())
# mmd instance using streaming features
mmd=sg.LinearTimeMMD(gen_p, gen_q)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
sigmas=[2**x for x in np.linspace(-5, 5, 11)]
print "Choosing kernel width from", ["{0:.2f}".format(sigma) for sigma in sigmas]
for i in range(len(sigmas)):
mmd.add_kernel(sg.GaussianKernel(10, sigmas[i]))
# optmal kernel choice is possible for linear time MMD
mmd.set_kernel_selection_strategy(sg.KSM_MAXIMIZE_POWER)
# must be set true for kernel selection
mmd.set_train_test_mode(True)
# select best kernel
mmd.select_kernel()
best_kernel=mmd.get_kernel()
best_kernel=sg.GaussianKernel.obtain_from_generic(best_kernel)
print "Best single kernel has bandwidth %.2f" % best_kernel.get_width()
mmd.set_null_approximation_method(sg.NAM_MMD1_GAUSSIAN);
p_value_best=mmd.compute_p_value(mmd.compute_statistic());
print "Bootstrapping: P-value of MMD test with optimal kernel is %.2f" % p_value_best
m=5000
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_train_test_mode(False)
num_samples=500
# sample null and alternative distribution, implicitly generate new data for that
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
null_samples=mmd.sample_null()
alt_samples=zeros(num_samples)
for i in range(num_samples):
alt_samples[i]=mmd.compute_statistic()
plot_alt_vs_null(alt_samples, null_samples, alpha)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some Formal Basics (skip if you just want code examples)
Step2: Now how to compare these two sets of samples? Clearly, a t-test would be a bad idea since it basically compares mean and variance of $X$ and $Y$. But we set that to be equal. By chance, the estimates of these statistics might differ, but that is unlikely to be significant. Thus, we have to look at higher order statistics of the samples. In fact, kernel two-sample tests look at all (infinitely many) higher order moments.
Step3: Quadratic Time MMD
Step4: Any sub-class of <a href="http
Step5: Now let us visualise distribution of MMD statistic under $H_0
Step6: Null and Alternative Distribution Illustrated
Step7: Different Ways to Approximate the Null Distribution for the Quadratic Time MMD
Step8: The above plot of the Eigenspectrum shows that the Eigenvalues are decaying extremely fast. We choose the number for the approximation such that all Eigenvalues bigger than some threshold are used. In this case, we will not loose a lot of accuracy while gaining a significant speedup. For slower decaying Eigenspectrums, this approximation might be more expensive.
Step9: The Gamma Moment Matching Approximation and Type I errors
Step10: As we can see, the above example was kind of unfortunate, as the approximation fails badly. We check the type I error to verify that. This works similar to sampling the alternative distribution
Step11: We see that Gamma basically never rejects, which is inline with the fact that the p-value was massively overestimated above. Note that for the other tests, the p-value is also not at its desired value, but this is due to the low number of samples/repetitions in the above code. Increasing them leads to consistent type I errors.
Step12: We now describe the linear time MMD, as described in [1, Section 6], which is implemented in Shogun. A fast, unbiased estimate for the original MMD expression which still uses all available data can be obtained by dividing data into two parts and then compute
Step13: Sometimes, one might want to use <a href="http
Step14: The Gaussian Approximation to the Null Distribution
Step15: Kernel Selection for the MMD -- Overview
Step16: Now perform two-sample test with that kernel
Step17: For the linear time MMD, the null and alternative distributions look different than for the quadratic time MMD as plotted above. Let's sample them (takes longer, reduce number of samples a bit). Note how we can tell the linear time MMD to smulate the null hypothesis, which is necessary since we cannot permute by hand as samples are not in memory)
Step18: And visualise again. Note that both null and alternative distribution are Gaussian, which allows the fast null distribution approximation and the optimal kernel selection
|
4,395
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
%matplotlib inline
from pyqg import sqg_model
# create the model object
year = 1.
m = sqg_model.SQGModel(L=2.*pi,nx=512, tmax = 26.005,
beta = 0., Nb = 1., H = 1., rek = 0., rd = None, dt = 0.005,
taveint=1, twrite=400, ntd=4)
# in this example we used ntd=4, four threads
# if your machine has more (or fewer) cores available, you could try changing it
# Choose ICs from Held et al. (1995)
# case i) Elliptical vortex
x = np.linspace(m.dx/2,2*np.pi,m.nx) - np.pi
y = np.linspace(m.dy/2,2*np.pi,m.ny) - np.pi
x,y = np.meshgrid(x,y)
qi = -np.exp(-(x**2 + (4.0*y)**2)/(m.L/6.0)**2)
# initialize the model with that initial condition
m.set_q(qi[np.newaxis,:,:])
# Plot the ICs
plt.rcParams['image.cmap'] = 'RdBu'
plt.clf()
p1 = plt.imshow(m.q.squeeze() + m.beta * m.y)
plt.title('b(x,y,t=0)')
plt.colorbar()
plt.clim([-1, 0])
plt.xticks([])
plt.yticks([])
plt.show()
for snapshot in m.run_with_snapshots(tsnapstart=0., tsnapint=400*m.dt):
plt.clf()
p1 = plt.imshow(m.q.squeeze() + m.beta * m.y)
#plt.clim([-30., 30.])
plt.title('t='+str(m.t))
plt.colorbar()
plt.clim([-1, 0])
plt.xticks([])
plt.yticks([])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Surface quasi-geostrophy (SQG) is a relatively simple model that describes surface intensified flows due to buoyancy. One of it's advantages is that it only has two spatial dimensions but describes a three-dimensional solution.
Step2: Initial condition
Step3: Runing the model
|
4,396
|
<ASSISTANT_TASK:>
Python Code:
# NBVAL_IGNORE_OUTPUT
import numpy as np
import matplotlib.pyplot as plot
import math as mt
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
# NBVAL_IGNORE_OUTPUT
%matplotlib inline
from examples.seismic import TimeAxis
from examples.seismic import RickerSource
from examples.seismic import Receiver
from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator
nptx = 101
nptz = 101
x0 = 0.
x1 = 1000.
compx = x1-x0
z0 = 0.
z1 = 1000.
compz = z1-z0;
hxv = (x1-x0)/(nptx-1)
hzv = (z1-z0)/(nptz-1)
npmlx = 20
npmlz = 20
lx = npmlx*hxv
lz = npmlz*hzv
nptx = nptx + 2*npmlx
nptz = nptz + 1*npmlz
x0 = x0 - hxv*npmlx
x1 = x1 + hxv*npmlx
compx = x1-x0
z0 = z0
z1 = z1 + hzv*npmlz
compz = z1-z0
origin = (x0,z0)
extent = (compx,compz)
shape = (nptx,nptz)
spacing = (hxv,hzv)
class d0domain(SubDomain):
name = 'd0'
def define(self, dimensions):
x, z = dimensions
return {x: ('middle', npmlx, npmlx), z: ('middle', 0, npmlz)}
d0_domain = d0domain()
class d1domain(SubDomain):
name = 'd1'
def define(self, dimensions):
x, z = dimensions
return {x: ('left',npmlx), z: z}
d1_domain = d1domain()
class d2domain(SubDomain):
name = 'd2'
def define(self, dimensions):
x, z = dimensions
return {x: ('right',npmlx), z: z}
d2_domain = d2domain()
class d3domain(SubDomain):
name = 'd3'
def define(self, dimensions):
x, z = dimensions
return {x: ('middle', npmlx, npmlx), z: ('right',npmlz)}
d3_domain = d3domain()
grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain,d1_domain,d2_domain,d3_domain), dtype=np.float64)
v0 = np.zeros((nptx,nptz))
v1 = np.zeros((nptx-1,nptz-1))
X0 = np.linspace(x0,x1,nptx)
Z0 = np.linspace(z0,z1,nptz)
x10 = x0+lx
x11 = x1-lx
z10 = z0
z11 = z1 - lz
xm = 0.5*(x10+x11)
zm = 0.5*(z10+z11)
pxm = 0
pzm = 0
for i in range(0,nptx):
if(X0[i]==xm): pxm = i
for j in range(0,nptz):
if(Z0[j]==zm): pzm = j
p0 = 0
p1 = pzm
p2 = nptz
v0[0:nptx,p0:p1] = 1.5
v0[0:nptx,p1:p2] = 2.5
p0 = 0
p1 = pzm
p2 = nptz-1
v1[0:nptx-1,p0:p1] = 1.5
v1[0:nptx-1,p1:p2] = 2.5
def graph2dvel(vel):
plot.figure()
plot.figure(figsize=(16,8))
fscale = 1/10**(3)
scale = np.amax(vel[npmlx:-npmlx,0:-npmlz])
extent = [fscale*(x0+lx),fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]
fig = plot.imshow(np.transpose(vel[npmlx:-npmlx,0:-npmlz]), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.title('Velocity Profile')
plot.grid()
ax = plot.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
cbar.set_label('Velocity [km/s]')
plot.show()
# NBVAL_IGNORE_OUTPUT
graph2dvel(v0)
t0 = 0.
tn = 1000.
CFL = 0.4
vmax = np.amax(v0)
dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))
ntmax = int((tn-t0)/dtmax)+1
dt0 = np.float64((tn-t0)/ntmax)
time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)
nt = time_range.num - 1
(hx,hz) = grid.spacing_map
(x, z) = grid.dimensions
t = grid.stepping_dim
dt = grid.stepping_dim.spacing
f0 = 0.01
nsource = 1
xposf = 0.5*(compx-2*npmlx*hxv)
zposf = hzv
src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)
src.coordinates.data[:, 0] = xposf
src.coordinates.data[:, 1] = zposf
# NBVAL_IGNORE_OUTPUT
src.show()
nrec = nptx
nxpos = np.linspace(x0,x1,nrec)
nzpos = hzv
rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)
rec.coordinates.data[:, 0] = nxpos
rec.coordinates.data[:, 1] = nzpos
u = TimeFunction(name="u",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)
phi1 = TimeFunction(name="phi1",grid=grid,time_order=2,space_order=2,staggered=(x,z),dtype=np.float64)
phi2 = TimeFunction(name="phi2",grid=grid,time_order=2,space_order=2,staggered=(x,z),dtype=np.float64)
vel0 = Function(name="vel0",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)
vel0.data[:,:] = v0[:,:]
vel1 = Function(name="vel1", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)
vel1.data[0:nptx-1,0:nptz-1] = v1
vel1.data[nptx-1,0:nptz-1] = vel1.data[nptx-2,0:nptz-1]
vel1.data[0:nptx,nptz-1] = vel1.data[0:nptx,nptz-2]
src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)
rec_term = rec.interpolate(expr=u)
x0pml = x0 + npmlx*hxv
x1pml = x1 - npmlx*hxv
z0pml = z0
z1pml = z1 - npmlz*hzv
def fdamp(x,z,i):
quibar = 0.05
if(i==1):
a = np.where(x<=x0pml,(np.abs(x-x0pml)/lx),np.where(x>=x1pml,(np.abs(x-x1pml)/lx),0.))
fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))
if(i==2):
a = np.where(z<=z0pml,(np.abs(z-z0pml)/lz),np.where(z>=z1pml,(np.abs(z-z1pml)/lz),0.))
fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))
return fdamp
def generatemdamp():
X0 = np.linspace(x0,x1,nptx)
Z0 = np.linspace(z0,z1,nptz)
X0grid,Z0grid = np.meshgrid(X0,Z0)
X1 = np.linspace((x0+0.5*hxv),(x1-0.5*hxv),nptx-1)
Z1 = np.linspace((z0+0.5*hzv),(z1-0.5*hzv),nptz-1)
X1grid,Z1grid = np.meshgrid(X1,Z1)
D01 = np.zeros((nptx,nptz))
D02 = np.zeros((nptx,nptz))
D11 = np.zeros((nptx,nptz))
D12 = np.zeros((nptx,nptz))
D01 = np.transpose(fdamp(X0grid,Z0grid,1))
D02 = np.transpose(fdamp(X0grid,Z0grid,2))
D11 = np.transpose(fdamp(X1grid,Z1grid,1))
D12 = np.transpose(fdamp(X1grid,Z1grid,2))
return D01, D02, D11, D12
D01, D02, D11, D12 = generatemdamp();
def graph2damp(D):
plot.figure()
plot.figure(figsize=(16,8))
fscale = 1/10**(-3)
fscale = 10**(-3)
scale = np.amax(D)
extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]
fig = plot.imshow(np.transpose(D), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.title('Absorbing Layer Function')
plot.grid()
ax = plot.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
cbar.set_label('Damping')
plot.show()
# NBVAL_IGNORE_OUTPUT
graph2damp(D01)
# NBVAL_IGNORE_OUTPUT
graph2damp(D02)
dampx0 = Function(name="dampx0", grid=grid,space_order=2,staggered=NODE ,dtype=np.float64)
dampz0 = Function(name="dampz0", grid=grid,space_order=2,staggered=NODE ,dtype=np.float64)
dampx0.data[:,:] = D01
dampz0.data[:,:] = D02
dampx1 = Function(name="dampx1", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)
dampz1 = Function(name="dampz1", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)
dampx1.data[0:nptx-1,0:nptz-1] = D11
dampz1.data[0:nptx-1,0:nptz-1] = D12
dampx1.data[nptx-1,0:nptz-1] = dampx1.data[nptx-2,0:nptz-1]
dampx1.data[0:nptx,nptz-1] = dampx1.data[0:nptx,nptz-2]
dampz1.data[nptx-1,0:nptz-1] = dampz1.data[nptx-2,0:nptz-1]
dampz1.data[0:nptx,nptz-1] = dampz1.data[0:nptx,nptz-2]
# White Region
pde01 = Eq(u.dt2-u.laplace*vel0**2)
# Blue Region
pde02a = u.dt2 + (dampx0+dampz0)*u.dtc + (dampx0*dampz0)*u - u.laplace*vel0*vel0
pde02b = - (0.5/hx)*(phi1[t,x,z-1]+phi1[t,x,z]-phi1[t,x-1,z-1]-phi1[t,x-1,z])
pde02c = - (0.5/hz)*(phi2[t,x-1,z]+phi2[t,x,z]-phi2[t,x-1,z-1]-phi2[t,x,z-1])
pde02 = Eq(pde02a + pde02b + pde02c)
pde10 = phi1.dt + dampx1*0.5*(phi1.forward+phi1)
a1 = u[t+1,x+1,z] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x,z+1]
a2 = u[t,x+1,z] + u[t,x+1,z+1] - u[t,x,z] - u[t,x,z+1]
pde11 = -(dampz1-dampx1)*0.5*(0.5/hx)*(a1+a2)*vel1**2
pde1 = Eq(pde10+pde11)
pde20 = phi2.dt + dampz1*0.5*(phi2.forward+phi2)
b1 = u[t+1,x,z+1] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x+1,z]
b2 = u[t,x,z+1] + u[t,x+1,z+1] - u[t,x,z] - u[t,x+1,z]
pde21 = -(dampx1-dampz1)*0.5*(0.5/hz)*(b1+b2)*vel1**2
pde2 = Eq(pde20+pde21)
stencil01 = Eq(u.forward,solve(pde01,u.forward) ,subdomain = grid.subdomains['d0'])
subds = ['d1','d2','d3']
stencil02 = [Eq(u.forward,solve(pde02, u.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]
stencil1 = [Eq(phi1.forward, solve(pde1,phi1.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]
stencil2 = [Eq(phi2.forward, solve(pde2,phi2.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]
bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]
# NBVAL_IGNORE_OUTPUT
op = Operator([stencil01,stencil02] + src_term + bc + [stencil1,stencil2] + rec_term,subs=grid.spacing_map)
u.data[:] = 0.
phi1.data[:] = 0.
phi2.data[:] = 0.
# NBVAL_IGNORE_OUTPUT
op(time=nt,dt=dt0)
def graph2d(U):
plot.figure()
plot.figure(figsize=(16,8))
fscale = 1/10**(3)
scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.
extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]
fig = plot.imshow(np.transpose(U[npmlx:-npmlx,0:-npmlz]),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.axis('equal')
plot.title('Map - Acoustic Problem PML Devito')
plot.grid()
ax = plot.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
cbar.set_label('Displacement [km]')
plot.draw()
plot.show()
# NBVAL_IGNORE_OUTPUT
graph2d(u.data[0,:,:])
def graph2drec(rec):
plot.figure()
plot.figure(figsize=(16,8))
fscaled = 1/10**(3)
fscalet = 1/10**(3)
scale = np.amax(rec[:,npmlx:-npmlx])/10.
extent = [fscaled*x0pml,fscaled*x1pml, fscalet*tn, fscalet*t0]
fig = plot.imshow(rec[:,npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))
plot.axis('equal')
plot.title('Receivers Signal Profile with PML - Devito')
ax = plot.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
plot.show()
# NBVAL_IGNORE_OUTPUT
graph2drec(rec.data)
assert np.isclose(np.linalg.norm(rec.data), 990, rtol=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From Devito's library of examples we import the following structures
Step2: The mesh parameters that we choose define the domain $\Omega_{0}$ plus the absorption region. For this, we use the following data
Step3: The number of points of the absorption layer in the directions $x$ and $z$ are given, respectively, by
Step4: The lengths $L_{x}$ and $L_{z}$ are given, respectively, by
Step5: We define the grid
Step6: As in the case of the Damping acoustic equation, we can here split the computations in the two subdomains
Step7: The blue region is the union of 3 subdomains
Step8: The figure below represents the division of domains that we did previously
Step9: The velocity field is needed in both staggered and non-staggered grids. As before, we read the file and interpolate it to the non-staggered grid. From these values we interpolate to the staggered grid.
Step10: Previously we introduce the local variables x10,x11,z10,z11,xm,zm,pxm and pzm that help us to create a specific velocity field, where we consider the whole domain (including the absorpion region). Below we include a routine to plot the velocity field.
Step11: Below we include the plot of velocity field.
Step12: We then define the temporal properties
Step13: The symbolic values associated with the spatial and temporal grids that are used in the composition of the equations are given by
Step14: We set the parameters for the Ricker source
Step15: Below we include the plot of Ricker source.
Step16: For the receivers
Step17: The displacement field u is allocated
Step18: The auxiliary functions $\phi_{1}(x,z,t)$ and $\phi_{2}(x,z,t)$ will be two fields of second order in time and space, which use points of type staggered.
Step19: We set the velocity on the non-staggered grid
Step20: and on the staggered one. Notice that the field has one less point in each direction.
Step21: Taking into account the dimension of the array v1 and the dimension of the field vel1 we will complete the line nptx-1 with information from the line nptx-2 and the column nptz-1 with information from the column nptz-2, information from the v1 array. This copy of information does not alter the properties of the vel1 velocity field in view of its structure of constant profiles on the part. Copying information is done by the following sequence of commands
Step22: We set the source term and receivers
Step23: The next step is to create the structures that reproduce the functions $\zeta_{1}(x,z)$ and $\zeta_{2}(x,z)$ and then assign these functions to fields in non-staggered and staggered grids.
Step24: Having set the boundaries of $\Omega$, we create a function fdamp, which represents $\zeta_{1}(x,z)$ (when $i=1$) and $\zeta_{2}(x,z)$ (when $i=2$).
Step25: We created the damping function that represents $\zeta_{1}(x,z)$ and $\zeta_{2}(x,z)$. We now define arrays with the damping function values on grid points (staggered and non-staggered)
Step26: Below we include a routine to plot the damping fields.
Step27: Below we include the plot of damping field in $x$ direction.
Step28: Below we include the plot of damping field in $z$ direction.
Step29: As pointed out previously, the functions $\zeta_{1}(x,z)$ and $\zeta_{2}(x,z)$ define damping in the directions $x$ and $z$ respectively. They will be identified with the symbolic names of dampx and dampz, respectively.
Step30: In terms of dimensions, the arrays D11 and D12 have dimension $(nptx-1)\times (nptz-1)$. As our grid has $nptx\times nptz$ points, so we complete the line nptx-1 with information from the line nptx-2 and the column nptz-1 with information from the column nptz-2, in fields dampx1 and dampz1 using the arrays D11 and D12, respectively.
Step31: As we saw previously, the acoustic equation with PML has the formulations
Step32: Now we define the stencils for each of the pdes that we created previously. The pde01 is defined on subdomain d0.
Step33: The pdes
Step34: The boundary conditions are set
Step35: We then define the operator (op) that will join the acoustic equation, source term, boundary conditions and receivers.
Step36: So that there are no residuals in the variables of interest, we reset the fields u, phi1 and phi2 as follows
Step37: We assign to op the number of time steps it must execute and the size of the time step in the local variables time and dt, respectively. This assignment is done as in <a href="01_introduction.ipynb">Introduction to Acoustic Problem</a>, where we have the following attribution structure
Step38: We view the result of the displacement field at the end time using the graph2d routine given by
Step39: Realize that the solution obtained here has a reduction in the noises when we compared with the results displayed on the notebook <a href="01_introduction.ipynb">Introduction to Acoustic Problem</a>. We plot the result of the Receivers using the graph2drec routine.
|
4,397
|
<ASSISTANT_TASK:>
Python Code:
from dkrz_forms import form_widgets
form_widgets.show_status('form-generation')
from dkrz_forms import form_widgets
form_widgets.create_form()
from dkrz_forms import form_widgets
form_widgets.show_status('form-retrieval')
MY_LAST_NAME = "testsuite" # e.gl MY_LAST_NAME = "schulz"
#-------------------------------------------------
from dkrz_forms import form_handler, form_widgets
form_info = form_widgets.check_and_retrieve(MY_LAST_NAME)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a data form
Step2: Retrieve your DKRZ data form
Step3: Please provide your last name
|
4,398
|
<ASSISTANT_TASK:>
Python Code:
def number_to_words(n):
Given a number n between 1-1000 inclusive return a list of words for the number.
_1={1:' ',2:"-"}
_9={1:"one",2:"two",3:"three",4:"four",5:'five',6:'six',7:'seven',8:"eight",9:'nine',0:'',11:'and'}
_100={0:'',1:'ten',2:"twenty",3:'thirty',4:'forty',5:'fifty',6:'sixty',7:'seventy',8:'eighty',9:'ninety',10:'hundred'}
_19={1:'eleven',2:'twelve',3:'thirteen',4:'fourteen',5:'fifteen',6:'sixteen',7:'seventeen',8:'eighteen',9:'nineteen'}
#above creates all the needed combinations for up to 999 and below makes 1000
str_n=str(n)
x=int(str_n[0])
if n==1000:
first,second,third='one thousand','',''
elif len(str_n)==3: # takes care of all numbers from 100-999
if int(str_n[1:3])>10 and int(str_n[1:3])<20: # writes for x11-x19
z=int(str_n[2])
first,second,third=_9[x]+_1[1]+_100[10]+_1[1]+_9[11],' ',_19[z]
elif int(str_n[2])==0 and int(str_n[1])==0: #writes for 100,200,300,...
first,second,third=_9[x]+_1[1]+_100[10],'',''
else:
y,z=int(str_n[1]),int(str_n[2])#writes for all other numbers of length 3
first,second,third=_9[x]+_1[1]+_100[10]+_1[1]+_9[11]+_1[1],_100[y],_9[z]
elif len(str_n)==2: #takes care of 10-99
if n>10 and n<20: # writes for 11-19
y=int(str_n[1])
first,second,third='','',_19[y]
else: #writes everything else
y=int(str_n[1])
first,second,third='',_100[x],_9[y]
else: #writes for 0-9
first,second,third='','',_9[x]
answer=first+second+third
return answer
assert number_to_words(333)=="three hundred and thirtythree"
assert number_to_words(21)=='twentyone'
assert number_to_words(20)=='twenty'
assert number_to_words(19)=='nineteen'
assert number_to_words(219)=='two hundred and nineteen'
assert number_to_words(1)=='one'
assert number_to_words(1000)=='one thousand'
assert number_to_words(101)=='one hundred and one'
assert number_to_words(180)=='one hundred and eighty'
assert number_to_words(100)=='one hundred'
assert True # use this for grading the number_to_words tests.
def count_letters(n):
Count the number of letters used to write out the words for 1-n inclusive.
numbers=[i for i in range(1,n+1)]
l=[number_to_words(i) for i in numbers]
f=''.join(l)
f1=f.replace(' ','')
f2=f1.replace('-','')
return len(f2)
assert count_letters(1)==3
assert count_letters(5)==19
assert count_letters(10)==39
assert count_letters(115)-count_letters(114)==20
assert True # use this for grading the count_letters tests.
count_letters(1000)
assert True # use this for gradig the answer to the original question.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Project Euler
Step2: Now write a set of assert tests for your number_to_words function that verifies that it is working as expected.
Step4: Now define a count_letters(n) that returns the number of letters used to write out the words for all of the the numbers 1 to n inclusive.
Step5: Now write a set of assert tests for your count_letters function that verifies that it is working as expected.
Step6: Finally used your count_letters function to solve the original question.
|
4,399
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csiro-bom', 'access-1-0', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.