Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
7,100
|
<ASSISTANT_TASK:>
Python Code:
import os
import re
import psycopg2
import getpass
from collections import OrderedDict
# database config
sqluser=getpass.getuser()
# keep sqlpass blank if using peer authentication
sqlpass=''
# database
sqldb='mimic'
sqlschema='public,mimiciii'
query_schema = 'set search_path to ' + sqlschema + ';'
if (not sqlpass) & (sqlpass != ''):
con = psycopg2.connect(user=sqluser, password=sqlpass, database=sqldb)
else:
con = psycopg2.connect(user=sqluser, database=sqldb)
print('Connected!')
# function to read a single script
def read_script(base_path, script_name):
query = ''
with open(os.path.join(base_path,script_name)) as f:
for line in f.readlines():
line = line.lstrip(' ').rstrip(' ')
if len(line)<1:
continue
elif len(line)<2:
query += line
else:
# ignore comments
if '--' in line:
line = line[0:line.index('--')]
query += line
# replace double newlines with single newline
query = query.replace('\n\n','\n')
return query
def extract_drop_line(query):
# hack out the drop materialized view/drop table statement
query_drop = []
if 'drop materialized view ' in query.lower():
query_drop.extend(re.findall('drop materialized view [A-z0-9_ ]+;\n',query,re.I))
if 'drop table ' in query.lower():
query_drop.extend(re.findall('drop table [A-z0-9_ ]+;\n',query,re.I))
if not query_drop:
query_drop = ''
elif len(query_drop)==1:
query = query.replace(query_drop[0], '')
query = [query]
else:
# have multiple drop/create statements
query_parts = list() #query.split(query_drop[1])[0]
for i, q in enumerate(query_drop):
# get first part of query
query_split = query.split(q)
query_parts.append(query_split[0])
query = query_split[1]
# now append the final table created in the full query
query_parts.append(query)
# remove the first element
query_parts = query_parts[1:]
query = query_parts
return query, query_drop
# benchmark query
def benchmark_query(con, query, query_schema=query_schema, query_drop=query_drop, parallel_workers=None):
cur = con.cursor()
cur.execute(query_schema)
if parallel_workers:
cur.execute('SET max_parallel_workers_per_gather TO {};'.format(parallel_workers))
else:
cur.execute('SET max_parallel_workers_per_gather TO DEFAULT;')
cur.execute(query_drop)
cur.execute('explain analyze ' + query)
result = cur.fetchall()
cur.execute('commit;')
cur.close()
query_plan = [item[0] for item in result]
time = float(query_plan[-1].replace('Execution time: ', '').replace(' ms', ''))
return time, query_plan
# example on a single concept
base_path = '/home/alistairewj/git/mimic-code/concepts'
script_name = 'demographics/icustay_detail.sql'
print(script_name, end='...')
# read the script's query
query = read_script(base_path, script_name)
# returns a list of queries/drop statements
query, query_drop = extract_drop_line(query)
if len(query)==1:
# most of the time each script only creates a single view/table
query = query[0]
query_drop = query_drop[0]
time, query_plan = benchmark_query(con, query, query_schema=query_schema, query_drop=query_drop)
print('{:6.1f}s'.format(time/1000))
else:
print('')
for i in range(len(query)):
time, query_plan = benchmark_query(con, query[i], query_schema=query_schema, query_drop=query_drop[i])
print(' part {} - {:6.1f}s'.format(i, time/1000))
query_plans = OrderedDict()
query_times = OrderedDict()
base_path = '/home/alistairewj/git/mimic-code/concepts'
# read through all make concepts
with open(os.path.join(base_path,'make-concepts.sql')) as fp:
for line in fp.readlines():
if len(line)<2:
continue
elif line[0:2] != '\\i':
continue
elif 'ccs_diagnosis_table.sql' in line:
continue
# get the name of the script
script_name = line[3:].rstrip('\n')
print('{:40s}'.format(script_name), end='... ')
# read the script's query
query = read_script(base_path, script_name)
query, query_drop = extract_drop_line(query)
if len(query)==1:
# most of the time each script only creates a single view/table
q = query[0]
qd = query_drop[0]
time, query_plan = benchmark_query(con, q, query_schema=query_schema, query_drop=qd)
print('{:6.1f}s'.format(time/1000))
else:
query_plans[script_name] = list()
query_times[script_name] = list()
for i in range(len(query)):
time, query_plan = benchmark_query(con, query[i], query_schema=query_schema, query_drop=query_drop[i])
print('')
print(' part {}...{:18s}{:6.1f}s'.format(i, '', time/1000))
query_plans[script_name].append(query_plan)
query_times[script_name].append(time)
# same thing, but test parallel
query_plans_single_core = OrderedDict()
query_times_single_core = OrderedDict()
parallel_workers = 0
base_path = '/home/alistairewj/git/mimic-code/concepts'
# read through all make concepts
with open(os.path.join(base_path,'make-concepts.sql')) as fp:
for line in fp.readlines():
if len(line)<2:
continue
elif line[0:2] != '\\i':
continue
elif 'ccs_diagnosis_table.sql' in line:
continue
# get the name of the script
script_name = line[3:].rstrip('\n')
print('{:40s}'.format(script_name), end='... ')
# read the script's query
query = read_script(base_path, script_name)
query, query_drop = extract_drop_line(query)
if len(query)==1:
# most of the time each script only creates a single view/table
q = query[0]
qd = query_drop[0]
time, query_plan = benchmark_query(con, q, query_schema=query_schema, query_drop=qd,
parallel_workers=0)
print('{:6.1f}s'.format(time/1000))
query_plans_single_core[script_name] = query_plan
query_times_single_core[script_name] = time
else:
query_plans_single_core[script_name] = list()
query_times_single_core[script_name] = list()
print('')
for i in range(len(query)):
time, query_plan = benchmark_query(con, query[i],
query_schema=query_schema, query_drop=query_drop[i],
parallel_workers=0)
print(' part {}...{:18s}{:6.1f}s'.format(i, '', time/1000))
query_plans_single_core[script_name].append(query_plan)
query_times_single_core[script_name].append(time)
# first print all queries which used a parallel plan
for q in query_plans:
for i, l in enumerate(query_plans[q]):
if 'Parallel' in l:
print(q)
break
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Benchmark all concepts in make-concepts.sql
Step2: Benchmark all concepts in make-concepts.sql
Step3: Compare parallel with no parallel
|
7,101
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import sys,os
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
nb = ia.nbshow(2)
f = mpimg.imread('../data/cameraman.tif')
nb.nbshow(f,'Imagem original')
fsort = np.sort(f.ravel()).reshape(f.shape)
nb.nbshow(fsort, 'Imagem pixels ordenados')
nb.nbshow()
h = ia.histogram(f)
plt.plot(h),plt.title('Histograma de f');
n = f.size
T = 255./n * np.cumsum(h)
T = T.astype('uint8')
plt.plot(T),plt.title('Transformação de intensidade para equalizar');
nb = ia.nbshow(3)
nb.nbshow(f,'imagem original, média=%d' % (f.mean()))
g = T[f]
nb.nbshow(g, 'imagem equalizada, média=%d' % (g.mean()))
gsort = np.sort(g.ravel()).reshape(g.shape)
nb.nbshow(gsort, 'imagem equalizada ordenada')
nb.nbshow()
plt.figure(0)
hg = ia.histogram(g)
plt.plot(hg),plt.title('Histograma da imagem equalizada')
plt.figure(1)
hgc = np.cumsum(hg)
plt.plot(hgc),plt.title('Histograma acumulado da imagem equalizada');
f = mpimg.imread('../data/angiogr.tif')
f = np.clip(f,75,255)
ia.adshow(f)
h = ia.histogram(f)
plt.plot(h);
#print('info:',ia.iaimginfo(f))
n = f.size
T = 255./n * np.cumsum(h)
T = T.astype('uint8')
print('T:',T)
plt.plot(T),plt.title('Transformação de intensidade para equalizar')
g = T[f]
#print('info:', ia.iaimginfo(g))
ia.adshow(g, 'imagem equalizada')
gn = ia.normalize(g)
#print 'info:',ia.iaimginfo(gn)
ia.adshow(gn, 'imagem equalizada e normalizada')
hgn = ia.histogram(gn)
plt.plot(hgn),plt.title('histograma');
wiki=np.array([[52,55,61,66,70,61,64,73],
[63,59,55,90,109,85,69,72],
[62,59,68,113,144,104,66,73],
[63,58,71,122,154,106,70,69],
[67,61,68,104,126,88,68,70],
[79,65,60,70,77,68,58,75],
[85,71,64,59,55,61,65,83],
[87,79,69,68,65,76,78,94]])
print('wiki=\n',wiki)
h = ia.histogram(wiki)
n = wiki.size
T = 255./n * np.cumsum(h)
T = np.floor(T).astype('uint8')
g = T[wiki]
print('g=\n',g)
gn = ia.normalize(g)
print('gn=\n',gn)
faux = g.ravel().astype('float')
minimum = min(faux)
maximum = max(faux)
lower = 0
upper = 255
gnn = np.round((faux - minimum) * (upper - lower) / (maximum - minimum) + lower,0)
gnn = gnn.reshape(g.shape).astype(np.int)
print('gnn=\n',gnn)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotamos seu histograma e calculamos a transformação de contraste que equaliza o histograma baseado
Step2: A aplicação da transformação T em f
Step3: Finalmente, plotamos o histograma da imagem equalizada. Note o efeito mencionado acima em que o
Step4: Um problema da formulação simplificada acima é que no caso da imagem original não ter nenhum
Step5: Observe que a Transformação que equaliza a imagem, o seu primeiro valor
Step6: Para fazer com que o valor do menor pixel da imagem equalizada seja zero, temos duas opções básicas
Step7: Verificando a equação da wikipedia
Step8: Comparando-se o resultado (gn) com o valor da Wikipedia, percebemos que existe uma
|
7,102
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import antipackage
import github.ellisonbg.misc.vizarray as va
def checkerboard(size):
Return a 2d checkboard of 0.0 and 1.0 as a NumPy array
a = np.zeros((size,size))
a[::2,::2] = 1.0
a[1::2,1::2] = 1.0
return a
va.enable()
checkerboard(5)
a = checkerboard(4)
assert a[0,0]==1.0
assert a.sum()==8.0
assert a.dtype==np.dtype(float)
assert np.all(a[0,0:5:2]==1.0)
assert np.all(a[1,0:5:2]==0.0)
b = checkerboard(5)
assert b[0,0]==1.0
assert b.sum()==13.0
assert np.all(b.ravel()[0:26:2]==1.0)
assert np.all(b.ravel()[1:25:2]==0.0)
va.set_block_size(10)
checkerboard(20)
assert True
va.set_block_size(5)
checkerboard(27)
assert True
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Checkerboard
Step3: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.
Step4: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.
|
7,103
|
<ASSISTANT_TASK:>
Python Code:
import re
from pyknow import *
class Producto(Fact):
Producto que ha comprado un cliente.
>>> Producto(nombre="pepsi", tipo="refresco de cola", cantidad=1)
pass
class Cupon(Fact):
Cupón a generar para la próxima compra del cliente.
>>> Cupon(tipo="2x1", producto="pepsi")
pass
class Promo(Fact):
Promoción vigente en el comercio.
>>> Promo(tipo="2x1", **depende_de_la_promo)
pass
class Beneficio(Fact):
Define los beneficios que obtiene el comercio por cada producto.
>>> Beneficio(nombre="pepsi", tipo="refresco de cola", ganancias=0.2)
pass
class OfertasNxM(KnowledgeEngine):
@DefFacts()
def carga_promociones_nxm(self):
Hechos iniciales.
Genera las promociones vigentes
yield Promo(tipo="2x1", producto="Dodot")
yield Promo(tipo="2x1", producto="Leche Pascual")
yield Promo(tipo="3x2", producto="Pilas AAA")
@Rule(Promo(tipo=MATCH.t & P(lambda t: re.match(r"\d+x\d+", t)),
producto=MATCH.p),
Producto(nombre=MATCH.p))
def oferta_nxm(self, t, p):
Sabemos que el cliente volverá para aprovechar
la promoción, ya que hoy ha comprado el producto.
self.declare(Cupon(tipo=t, producto=p))
watch('RULES', 'FACTS')
nxm = OfertasNxM()
nxm.reset()
nxm.declare(Producto(nombre="Dodot"))
nxm.declare(Producto(nombre="Agua Mineral"))
nxm.declare(Producto(nombre="Pilas AAA"))
nxm.run()
nxm.facts
class OfertasPACK(KnowledgeEngine):
@DefFacts()
def carga_promociones_pack(self):
Genera las promociones vigentes
yield Promo(tipo="PACK", producto1="Fregona ACME", producto2="Mopa ACME", descuento="25%")
yield Promo(tipo="PACK", producto1="Pasta Gallo", producto2="Tomate Frito", descuento="10%")
@Rule(Promo(tipo="PACK", producto1=MATCH.p1, producto2=MATCH.p2, descuento=MATCH.d),
OR(
AND(
NOT(Producto(nombre=MATCH.p1)),
Producto(nombre=MATCH.p2)
),
AND(
Producto(nombre=MATCH.p1),
NOT(Producto(nombre=MATCH.p2))
)
)
)
def pack(self, p1, p2, d):
El cliente querrá comprar un producto adicional en su próxima visita.
self.declare(Cupon(tipo="PACK", producto1=p1, producto2=p2, descuento=d))
pack = OfertasPACK()
pack.reset()
pack.declare(Producto(nombre="Tomate Frito"))
pack.declare(Producto(nombre="Fregona ACME"))
pack.run()
pack.reset()
pack.declare(Producto(nombre="Fregona ACME"))
pack.declare(Producto(nombre="Mopa ACME"))
pack.run()
class OfertasDescuento(KnowledgeEngine):
@DefFacts()
def carga_beneficios(self):
Define las beneficios por producto.
yield Beneficio(nombre="Mahou", tipo="Cerveza", ganancias=0.5)
yield Beneficio(nombre="Cerveza Hacendado", tipo="Cerveza", ganancias=0.9)
yield Beneficio(nombre="Pilas AAA Duracell", tipo="Pilas AAA", ganancias=1.5)
yield Beneficio(nombre="Pilas AAA Hacendado", tipo="Pilas AAA", ganancias=2)
@Rule(Producto(nombre=MATCH.p1),
Beneficio(nombre=MATCH.p1, tipo=MATCH.t, ganancias=MATCH.g1),
Beneficio(nombre=MATCH.p2, tipo=MATCH.t, ganancias=MATCH.g2),
TEST(lambda g1, g2: g2 > g1)
)
def descuento_producto_con_mayor_beneficio(self, p2, g1, g2, **_):
diferencia_ganancia = g2 - g1
self.declare(Cupon(tipo="DESCUENTO",
producto=p2,
cantidad=diferencia_ganancia / 2))
descuento = OfertasDescuento()
descuento.reset()
descuento.declare(Producto(nombre="Mahou"))
descuento.run()
descuento.reset()
descuento.declare(Producto(nombre="Pilas AAA Hacendado"))
descuento.run()
class GeneradorCupones(OfertasNxM, OfertasPACK, OfertasDescuento):
def generar_cupones(self, *nombre_productos):
# Reiniciamos el motor
self.reset()
# Declaramos los productos que ha comprado el cliente
for nombre in nombre_productos:
self.declare(Producto(nombre=nombre))
# Ejecutamos el motor
self.run()
# Extraemos las promociones generadas
for fact in self.facts.values():
if isinstance(fact, Cupon):
yield fact
ke = GeneradorCupones()
[cupon for cupon in ke.generar_cupones("Pilas AAA", "Mahou", "Tomate Frito")]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Hechos
Step8: Objetivo 1
Step9: Pruebas
Step12: Objetivo 2
Step13: Pruebas
Step14: Si compramos ambos productos de un pack no se nos debe generar la promoción, ya que en este caso el comercio perdería beneficio.
Step17: Objetivo 3
Step18: Pruebas
Step19: El sistema no debe generar cupón si se ha comprado el producto con mayor beneficio
Step20: Juntándolo todo
|
7,104
|
<ASSISTANT_TASK:>
Python Code:
print(9876543)
name = 12345
number = 100000
print("ID before: " + str(id(number)))
number = 123456
print("ID after: " + str(id(number)))
number = 100000
print("ID before: " + str(id(number)))
number = number + 1
print("ID after: " + str(id(number)))
print(f"ID of number ({number}): " + str(id(number)))
number2 = 100001
print(f"ID of number2 ({number2}): " + str(id(number2)))
print("ID of number: " + str(id(number)))
number3 = number
print("ID of number2: " + str(id(number3)))
my_list = ['It\'s', 'never', 'enough']
print(f"id() of my_list ({my_list}) before:\n\t" + str(id(my_list)))
my_list[2] = 'lupus'
print(f"id() of my_list ({my_list}) after:\n\t" + str(id(my_list)))
str1 = "Puns are the highest form of literature."
str2 = str1
str2 = str2 + "\n\t - Alfred Hitchcock"
print(str1)
print('-' * len(str1))
print(str2)
list1 = [2, 8, 20, 28, 50, 82]
list2 = list1
list2.append(126)
print(list1)
print('-' * len(str(list1)))
print(list2)
print(id(list1))
print(id(list2))
list1 = [2, 8, 20, 28, 50, 82]
list2 = list1.copy()
list2.append(126)
print(list1)
print('-' * len(str(list1)))
print(list2)
def append_to_string(my_string):
print('\t--- Inside the function now ---')
print(f'\tFunction got value: {my_string}, with id: {id(my_string)}.')
my_string = my_string + 'Z'
print(f'\tChanged my_string to be {my_string}, with id: {id(my_string)}.')
print('\t--- Finished to run the function now ---')
s = 'Hello'
print(f'Before calling the function: s = {s}, with id: {id(s)}.')
append_to_string(s)
print(f'After calling the function: s = {s}, with id: {id(s)}.')
def append_to_list(my_list):
print('\t--- Inside the function now ---')
print(f'\tFunction got value: {my_list}, with id: {id(my_list)}.')
my_list = my_list + [126]
print(f'\tChanged my_string to be {my_list}, with id: {id(my_list)}.')
print('\t--- Finished to run the function now ---')
l = [2, 8, 20, 28, 50, 82]
print(f'Before calling the function: l = {l}, with id: {id(l)}.')
append_to_list(l)
print(f'After calling the function: l = {l}, with id: {id(l)}.')
def append_to_list(my_list):
print('\t--- Inside the function now ---')
print(f'\tFunction got value: {my_list}, with id: {id(my_list)}.')
my_list.append(126)
print(f'\tChanged my_string to be {my_list}, with id: {id(my_list)}.')
print('\t--- Finished to run the function now ---')
l = [2, 8, 20, 28, 50, 82]
print(f'Before calling the function: l = {l}, with id: {id(l)}.')
append_to_list(l)
print(f'After calling the function: l = {l}, with id: {id(l)}.')
def append_to_list(my_list):
my_list.append(126)
l = [2, 8, 20, 28, 50, 82]
append_to_list(l)
print(l)
def append_to_list(my_list):
list_copy = my_list.copy() # גם יעבוד my_list = my_list.copy()
list_copy.append(126)
return list_copy
l = [2, 8, 20, 28, 50, 82]
new_l = append_to_list(l) # l גם יעבוד, אבל יאבד את הערך של l = append_to_list(l)
print(l)
print(new_l)
animals = ('dog', 'fish', 'horse')
first_animal = animals[0]
print(f"The first animal is {first_animal}")
animals[1] = 'pig'
my_tuple = tuple()
my_tuple = (4, )
my_home = (35.027185, -111.022388) # x, y
traingle_sides_length = (4, 5, 6)
possible_directions = ('UP', 'DOWN', 'LEFT', 'RIGHT')
students_and_age = [('Itamar', 50), ('Yam', '27'), ('David', 16)] # רשימה של טאפלים
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <p style="text-align
Step2: <p style="text-align
Step3: <p style="text-align
Step4: <p style="text-align
Step5: <p style="text-align
Step6: <p style="text-align
Step7: <p style="text-align
Step8: <p style="text-align
Step9: <p style="text-align
Step10: <p style="text-align
Step11: <span style="text-align
Step12: <p style="text-align
Step13: <p style="text-align
Step14: <p style="text-align
Step15: <p style="text-align
Step16: <p style="text-align
Step17: <p style="text-align
Step18: <p style="text-align
Step19: <p style="text-align
Step20: <p style="text-align
Step21: <div class="align-center" style="display
|
7,105
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60).load_data()
events = mne.find_events(raw, stim_channel='STI 014')
sample_data_events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw-eve.fif')
events_from_file = mne.read_events(sample_data_events_file)
assert np.array_equal(events, events_from_file[:len(events)])
mne.find_events(raw, stim_channel='STI 014')
events_no_button = mne.pick_events(events, exclude=32)
merged_events = mne.merge_events(events, [1, 2, 3], 1)
print(np.unique(merged_events[:, -1]))
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'buttonpress': 32}
fig = mne.viz.plot_events(events, sfreq=raw.info['sfreq'],
first_samp=raw.first_samp, event_id=event_dict)
fig.subplots_adjust(right=0.7) # make room for legend
raw.plot(events=events, start=5, duration=10, color='gray',
event_color={1: 'r', 2: 'g', 3: 'b', 4: 'm', 5: 'y', 32: 'k'})
new_events = mne.make_fixed_length_events(raw, start=5, stop=50, duration=2.)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The tutorial tut-events-vs-annotations describes in detail the
Step2: Reading and writing events from/to a file
Step3: When writing event arrays to disk, the format will be inferred from the file
Step4: .. sidebar
Step5: It is also possible to combine two Event IDs using
Step6: Note, however, that merging events is not necessary if you simply want to
Step7: Event dictionaries like this one are used when extracting epochs from
Step8: Plotting events and raw data together
Step9: Making equally-spaced Events arrays
|
7,106
|
<ASSISTANT_TASK:>
Python Code:
from nbloader import Notebook
loaded_notebook = Notebook('test.ipynb')
loaded_notebook.run_all()
loaded_notebook.ns['a']
loaded_notebook.ns['b']
loaded_notebook.run_tag('add_one')
print(loaded_notebook.ns['a'])
loaded_notebook.run_tag('add_one')
print(loaded_notebook.ns['a'])
loaded_notebook.ns['a'] = 0
loaded_notebook.run_tag('add_one')
print(loaded_notebook.ns['a'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above commad loades a notebook as an object. This can be done inside a jupyter notebook or a regular python script.
Step2: After loaded_notebook.run_all() is called
Step3: The notebooks namesace is just a dict so if you try to get something thats not there will get an error.
Step4: Run individual cells if they are tagged.
Step5: If a cell have a comment on its first line it will become a tag.
|
7,107
|
<ASSISTANT_TASK:>
Python Code:
import mysql.connector
import pandas as pd
df= pd.read_csv('C:/Users/Alex/Documents/eafit/semestres/X semestre/programacion/taller2.tsv', sep = '\t')
df[:1]
CREATE TABLE enfermedad
(
id_enfermedad int PRIMARY KEY,
nombre varchar(255)
);
create table plataforma
(
id_plataforma int primary key,
nombre varchar(255)
);
CREATE TABLE loci
(
id_loci int NOT NULL PRIMARY KEY,
region varchar(255),
chrom varchar(255),
pos int,
genes_reportados int,
gen_mapped varchar(255),
gen_upstream int,
gen_downstream int,
SNP_GENE_IDS int,
UPSTREAM_GENE_DISTANCE int,
DOWNSTREAM_GENE_DISTANCE int,
STRONGEST_SP_RISK varchar(255),
SNPS varchar(255),
MERGED int,
SNP_ID_CURRENT varchar(255),
CONTEXTO varchar(255),
risk_allele varchar(255),
PVAl int,
PVALUE_MLOG int,
PVALUE_txt varchar(255),
BETA int,
novCI varchar(255),
id_plataforma int,
foreign key (id_plataforma) references plataforma(id_plataforma)
);
CREATE TABLE enfermedad_loci
(
id_enfermedad int,
id_loci int,
PRIMARY KEY (id_enfermedad, id_loci),
foreign key (id_enfermedad) references enfermedad(id_enfermedad),
foreign key (id_loci) references loci(id_loci)
);
CREATE TABLE journal
(
id_journal int primary key,
nombre varchar(255)
);
create table publicacion
(
id_publicacion int,
id_pubmed int,
autor varchar (255),
fecha_pub varchar (20),
link varchar (255),
id_journal int,
id_estudio int,
foreign key (id_journal) references journal(id_journal),
foreign key (id_estudio) references estudio(id_estudio)
);
CREATE TABLE estudio
(
nombre varchar(255),
id_estudio int primary key,
id_enfermedad int,
id_publicacion int,
foreign key (id_publicacion) references publicacion(id_publicacion),
foreign key (id_enfermedad) references enfermedad(id_enfermedad),
tamano_muestra int,
replicas int
);
df.head(1)
hostname = '127.0.0.1'
username = 'alexacl95'
password = 'SUSAna05'
database = 'programacion'
def doQuery( conn ) :
cur = conn.cursor()
cur.execute( "select * from enfermedad" )
for id_nombre, nombre_enf in cur.fetchall() :
print (id_nombre, nombre_enf)
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
doQuery( myConnection )
myConnection.close()
def get_diseaseId(disease_name):
cur = myConnection.cursor()
cur.execute( select * from enfermedad where nombre = "%s" % (disease_name) )
id_enf = None
for id_, nombre_enf in cur.fetchall() :
id_enf = id_
if not id_enf:
cur.execute(insert into enfermedad values (NULL, "%s" ) % (disease_name))
cur.execute("SELECT LAST_INSERT_ID()")
id_enf = cur.fetchall()[0][0]
myConnection.commit()
return id_enf
def get_platId(plat_name):
cur = myConnection.cursor()
cur.execute( select * from plataforma where nombre = "%s" % (plat_name) )
id_plat = None
for id_, nombre_plat in cur.fetchall() :
id_plat = id_
if not id_plat:
print(insert into plataforma values (NULL, "%s" ) % (plat_name))
cur.execute(insert into plataforma values (NULL, "%s" ) % (plat_name))
cur.execute("SELECT LAST_INSERT_ID()")
id_plat = cur.fetchall()[0][0]
myConnection.commit()
return id_plat
for index, row in df.iterrows():
plat_name = row['PLATFORM [SNPS PASSING QC]']
plat_id = get_platId(plat_name)
def get_lociId(loci_name):
cur = myConnection.cursor()
cur.execute( select * from loci where nombre = "%s" % (disease_name) )
id_loci = None
for id_, nombre_enf in cur.fetchall() :
id_loci = id_
if not id_loci:
print(insert into enfermedad values (NULL, "%s", ) % (disease_name))
cur.execute(insert into enfermedad values (NULL, "%s" ) % (disease_name))
cur.execute("SELECT LAST_INSERT_ID()")
id_enf = cur.fetchall()[0][0]
myConnection.commit()
return id_enf
hostname = '127.0.0.1'
username = 'alexacl95'
password = 'SUSAna05'
database = 'programacion'
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
for index, row in df.iterrows():
dis_name = row['DISEASE/TRAIT']
dissease_id = get_diseaseId(dis_name)
print()
myConnection.close()
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_query.html
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: La idea de este taller es manipular archivos (leerlos, parsearlos y escribirlos) y hacer lo mismo con bases de datos estructuradas.
Step2: Qué Entidades (tablas) puede definir?
Step11: Ejercicio 2
Step12: Ejercicio 3
|
7,108
|
<ASSISTANT_TASK:>
Python Code:
import mariadb
import json
with open('../credentials.json', 'r') as crd_json_fd:
json_text = crd_json_fd.read()
json_obj = json.loads(json_text)
credentials = json_obj["Credentials"]
username = credentials["username"]
password = credentials["password"]
table_name = "publications"
db_name = "ubbcluj"
mariadb_connection = mariadb.connect(user=username, password=password, database=db_name)
mariadb_cursor = mariadb_connection.cursor()
queryString = "SELECT COUNT(*) FROM "
queryString += table_name
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
for item in mariadb_cursor:
count = item[0]
print("Number of publications: {0}".format(count))
queryString = "SELECT Affiliations, COUNT(*) AS c FROM publications GROUP BY Affiliations ORDER BY c DESC"
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
affiliations = []
for item in mariadb_cursor:
Affiliation = item[0]
affiliations.append(item[0])
c = item[1]
print("{0} : {1} occurences".format(Affiliation, c))
for i in affiliations:
if "conference" in [k.lower() for k in i.split()]:
print(i)
for i in affiliations:
if "journal" in [k.lower() for k in i.split()]:
print(i)
tokens = []
for i in affiliations:
words = i.split()
for word in words:
tokens.append(word)
tokens
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
sr= stopwords.words('english')
clean_tokens = tokens[:]
for token in tokens:
if token in stopwords.words('english'):
clean_tokens.remove(token)
freq = nltk.FreqDist(clean_tokens)
for key,val in freq.items():
#print(str(key) + ':' + str(val))
pass
freq.plot(20, cumulative=False)
# Histogram of professors publication number
queryString = "SELECT (Select FullName from humanoid_entities where id = ProfessorId), ProfessorId, COUNT(Title) FROM publications GROUP BY ProfessorId ORDER BY COUNT(Title) desc"
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
name_dict = {}
tup_list = []
max_id = 0
for item in mariadb_cursor:
ProfName = item[0]
ProfId = item[1]
max_id = max(max_id, ProfId)
Count = item[2]
tup_list.append((ProfName, ProfId, Count))
name_dict[ProfId] = ProfName
import pandas as pd
print(tup_list)
final_list = []
for i in range(max_id):
found_id = False
found_value = 0
for k in tup_list:
if i == k[1]:
found_id = True
found_value = k[2]
break
if not found_id:
name_dict[i] = "NONE"
final_list.append((i, found_value))
fa = pd.DataFrame([k[1] for k in final_list])
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.bar([name_dict[k[0]] for k in final_list], [k[1] for k in final_list])
loc = plticker.MultipleLocator(base=1.0)
ax.xaxis.set_major_locator(loc)
plt.xticks(rotation=90)
plt.show()
queryString = "SELECT Title FROM publications"
try:
mariadb_cursor.execute(queryString)
except Exception as ex:
print(ex)
titles = []
for item in mariadb_cursor:
Title = item[0]
titles.append(item[0])
from sklearn.feature_extraction.text import CountVectorizer
corpus = titles[:]
corpus
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus)
print(vectorizer.get_feature_names())
print(X.shape)
print(X.toarray())
for k in X.toarray():
for j in k:
if j > 1:
print(j)
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer(use_idf=False, norm='l1')
matrix = vec.fit_transform(corpus)
pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
from textblob import TextBlob
import nltk
nltk.download('punkt')
def textblob_tokenizer(str_input):
blob = TextBlob(str_input.lower())
tokens = blob.words
words = [token.stem() for token in tokens]
return words
vec = CountVectorizer(tokenizer=textblob_tokenizer)
matrix = vec.fit_transform(corpus)
pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
vec = TfidfVectorizer(tokenizer=textblob_tokenizer,
stop_words='english',
use_idf=True)
matrix = vec.fit_transform(corpus)
df = pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
for word in df.columns:
for row in df[word]:
if row != 0.0:
print(word, row)
from sklearn.cluster import KMeans
number_of_clusters = 10
km = KMeans(n_clusters=number_of_clusters)
km.fit(matrix)
km.fit
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vec.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
results = pd.DataFrame({
'corpus': corpus,
'category': km.labels_
})
results.sort_values('category')
for k in results.sort_values('category').values:
print(k[1], " --- ", k[0])
### GENSIM
from gensim.models import word2vec
from gensim.test.utils import common_texts, get_tmpfile
tokenized_sentences = [[j.lower() for j in st.split() if j not in stopwords.words('english')] for st in corpus]
model = word2vec.Word2Vec(tokenized_sentences, min_count=1)
model.save("word2vec.model")
#model = word2vec.load("word2vec.model")
model
model.wv["study"]
words = list(model.wv.vocab)
print(words)
X = model[model.wv.vocab]
df = pd.DataFrame(df)
df.shape
df.head()
import numpy as np
import io
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word in model.wv.vocab:
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in model[word]]) + "\n")
out_v.close()
out_m.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Counting publications.
Step2: 3. Distinct Affiliations
Step3: 3. TF-IDF and K-Means?
|
7,109
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sqlite3
%matplotlib inline
# Connect to the MIMIC database
conn = sqlite3.connect('data/mimicdata.sqlite')
# Create our test query
test_query =
SELECT subject_id, hadm_id, admittime, dischtime, admission_type, diagnosis
FROM admissions
LIMIT 10;
# Run the query and assign the results to a variable
test = pd.read_sql_query(test_query,conn)
# Display the first few rows
test.head()
query =
SELECT de.icustay_id
, (strftime('%s',de.charttime)-strftime('%s',ie.intime))/60.0/60.0 as HOURS
, di.label
, de.value
, de.valuenum
, de.uom
FROM chartevents de
INNER join d_items di
ON de.itemid = di.itemid
INNER join icustays ie
ON de.icustay_id = ie.icustay_id
WHERE de.subject_id = 40084
ORDER BY charttime;
ce = pd.read_sql_query(query,conn)
# OPTION 2: load chartevents from a CSV file
# ce = pd.read_csv('data/example_chartevents.csv', index_col='HOURSSINCEADMISSION')
# Preview the data
# Use 'head' to limit the number of rows returned
ce.head()
# Select a single column
ce['LABEL'].head()
# Select just the heart rate rows using an index
ce[ce.LABEL=='Heart Rate'].head()
# Which time stamps have a corresponding heart rate measurement?
print ce.index[ce.LABEL=='Heart Rate']
# Set x equal to the times
x_hr = ce.HOURS[ce.LABEL=='Heart Rate']
# Set y equal to the heart rates
y_hr = ce.VALUENUM[ce.LABEL=='Heart Rate']
# Plot time against heart rate
plt.figure(figsize=(14, 6))
plt.plot(x_hr,y_hr)
plt.xlabel('Time',fontsize=16)
plt.ylabel('Heart rate',fontsize=16)
plt.title('Heart rate over time from admission to the intensive care unit')
ce['LABEL'].unique()
# Exercise 1 here
# Set x equal to the times
x_hr = ce.HOURS[ce.LABEL=='Heart Rate']
# Set y equal to the heart rates
y_hr = ce.VALUENUM[ce.LABEL=='Heart Rate']
# Plot time against heart rate
plt.figure(figsize=(14, 6))
plt.plot(x_hr,y_hr)
# Get some information regarding arctic sun
plt.plot(ce.HOURS[ce.LABEL=='Arctic Sun/Alsius Set Temp'],
ce.VALUENUM[ce.LABEL=='Arctic Sun/Alsius Set Temp'],
'k+--',markersize=8)
plt.plot(ce.HOURS[ce.LABEL=='Arctic Sun Water Temp'],
ce.VALUENUM[ce.LABEL=='Arctic Sun Water Temp'],
'r+--',markersize=8)
plt.plot(ce.HOURS[ce.LABEL=='Arctic Sun/Alsius Temp #1 C'],
ce.VALUENUM[ce.LABEL=='Arctic Sun/Alsius Temp #1 C'],
'b+--',markersize=8)
plt.plot(ce.HOURS[ce.LABEL=='Arctic Sun/Alsius Temp #2 C'],
ce.VALUENUM[ce.LABEL=='Arctic Sun/Alsius Temp #2 C'],
'g+--',markersize=8)
plt.xlabel('Time',fontsize=16)
plt.ylabel('Heart rate',fontsize=16)
plt.xlabel('Time (hours)',fontsize=16)
plt.ylabel('Heart rate / temperature',fontsize=16)
plt.title('Heart rate over time')
plt.ylim(0,80)
plt.xlim(0,48)
plt.legend()
plt.figure(figsize=(14, 6))
plt.plot(ce.HOURS[ce.LABEL=='Respiratory Rate'],
ce.VALUENUM[ce.LABEL=='Respiratory Rate'],
'k+', markersize=10, linewidth=4)
plt.plot(ce.HOURS[ce.LABEL=='Resp Alarm - High'],
ce.VALUENUM[ce.LABEL=='Resp Alarm - High'],
'm--')
plt.plot(ce.HOURS[ce.LABEL=='Resp Alarm - Low'],
ce.VALUENUM[ce.LABEL=='Resp Alarm - Low'],
'm--')
plt.xlabel('Time',fontsize=16)
plt.ylabel('Respiratory rate',fontsize=16)
plt.title('Respiratory rate over time from admission, with upper and lower alarm thresholds')
plt.ylim(0,55)
# Display the first few rows of the GCS eye response data
ce[ce.LABEL=='GCS - Eye Opening'].head()
# Prepare the size of the figure
plt.figure(figsize=(18, 10))
# Set x equal to the times
x_hr = ce.HOURS[ce.LABEL=='Heart Rate']
# Set y equal to the heart rates
y_hr = ce.VALUENUM[ce.LABEL=='Heart Rate']
plt.plot(x_hr,y_hr)
plt.plot(ce.HOURS[ce.LABEL=='Respiratory Rate'],
ce.VALUENUM[ce.LABEL=='Respiratory Rate'],
'k', markersize=6)
# Add a text label to the y-axis
plt.text(-5,155,'GCS - Eye Opening',fontsize=14)
plt.text(-5,150,'GCS - Motor Response',fontsize=14)
plt.text(-5,145,'GCS - Verbal Response',fontsize=14)
# Iterate over list of GCS labels, plotting around 1 in 10 to avoid overlap
for i, txt in enumerate(ce.VALUE[ce.LABEL=='GCS - Eye Opening'].values):
if np.mod(i,6)==0 and i < 65:
plt.annotate(txt, (ce.HOURS[ce.LABEL=='GCS - Eye Opening'].values[i],155),fontsize=14)
for i, txt in enumerate(ce.VALUE[ce.LABEL=='GCS - Motor Response'].values):
if np.mod(i,6)==0 and i < 65:
plt.annotate(txt, (ce.HOURS[ce.LABEL=='GCS - Motor Response'].values[i],150),fontsize=14)
for i, txt in enumerate(ce.VALUE[ce.LABEL=='GCS - Verbal Response'].values):
if np.mod(i,6)==0 and i < 65:
plt.annotate(txt, (ce.HOURS[ce.LABEL=='GCS - Verbal Response'].values[i],145),fontsize=14)
plt.title('Vital signs and Glasgow Coma Scale over time from admission',fontsize=16)
plt.xlabel('Time (hours)',fontsize=16)
plt.ylabel('Heart rate or GCS',fontsize=16)
plt.ylim(10,165)
# OPTION 1: load outputs from the patient
query =
select de.icustay_id
, (strftime('%s',de.charttime)-strftime('%s',ie.intime))/60.0/60.0 as HOURS
, di.label
, de.value
, de.valueuom
from outputevents de
inner join icustays ie
on de.icustay_id = ie.icustay_id
inner join d_items di
on de.itemid = di.itemid
where de.subject_id = 40084
order by charttime;
oe = pd.read_sql_query(query,conn)
oe.head()
plt.figure(figsize=(14, 10))
plt.figure(figsize=(14, 6))
plt.title('Fluid output over time')
plt.plot(oe.HOURS,
oe.VALUE.cumsum()/1000,
'ro', markersize=8, label='Output volume, L')
plt.xlim(0,72)
plt.ylim(0,10)
plt.legend()
# OPTION 1: load inputs given to the patient (usually intravenously) using the database connection
query =
select de.icustay_id
, (strftime('%s',de.starttime)-strftime('%s',ie.intime))/60.0/60.0 as HOURS_START
, (strftime('%s',de.endtime)-strftime('%s',ie.intime))/60.0/60.0 as HOURS_END
, de.linkorderid
, di.label
, de.amount
, de.amountuom
, de.rate
, de.rateuom
from inputevents_mv de
inner join icustays ie
on de.icustay_id = ie.icustay_id
inner join d_items di
on de.itemid = di.itemid
where de.subject_id = 40084
order by endtime;
ie = pd.read_sql_query(query,conn)
# # OPTION 2: load ioevents using the CSV file with endtime as the index
# ioe = pd.read_csv('inputevents.csv'
# ,header=None
# ,names=['subject_id','itemid','label','starttime','endtime','amount','amountuom','rate','rateuom']
# ,parse_dates=True)
ie.head()
ie['LABEL'].unique()
plt.figure(figsize=(14, 10))
# Plot the cumulative input against the cumulative output
plt.plot(ie.HOURS_END[ie.AMOUNTUOM=='mL'],
ie.AMOUNT[ie.AMOUNTUOM=='mL'].cumsum()/1000,
'go', markersize=8, label='Intake volume, L')
plt.plot(oe.HOURS,
oe.VALUE.cumsum()/1000,
'ro', markersize=8, label='Output volume, L')
plt.title('Fluid balance over time',fontsize=16)
plt.xlabel('Hours',fontsize=16)
plt.ylabel('Volume (litres)',fontsize=16)
# plt.ylim(0,38)
plt.legend()
plt.figure(figsize=(14, 10))
# Plot the cumulative input against the cumulative output
plt.plot(ie.HOURS_END[ie.AMOUNTUOM=='mL'],
ie.AMOUNT[ie.AMOUNTUOM=='mL'].cumsum()/1000,
'go', markersize=8, label='Intake volume, L')
plt.plot(oe.HOURS,
oe.VALUE.cumsum()/1000,
'ro', markersize=8, label='Output volume, L')
# example on getting two columns from a dataframe: ie[['HOURS_START','HOURS_END']].head()
for i, idx in enumerate(ie.index[ie.LABEL=='Furosemide (Lasix)']):
plt.plot([ie.HOURS_START[ie.LABEL=='Furosemide (Lasix)'][idx],
ie.HOURS_END[ie.LABEL=='Furosemide (Lasix)'][idx]],
[ie.RATE[ie.LABEL=='Furosemide (Lasix)'][idx],
ie.RATE[ie.LABEL=='Furosemide (Lasix)'][idx]],
'b-',linewidth=4)
plt.title('Fluid balance over time',fontsize=16)
plt.xlabel('Hours',fontsize=16)
plt.ylabel('Volume (litres)',fontsize=16)
# plt.ylim(0,38)
plt.legend()
ie['LABEL'].unique()
# Exercise 2 here
plt.figure(figsize=(14, 10))
plt.plot(ce.index[ce.LABEL=='Heart Rate'],
ce.VALUENUM[ce.LABEL=='Heart Rate'],
'rx', markersize=8, label='HR')
plt.plot(ce.index[ce.LABEL=='O2 saturation pulseoxymetry'],
ce.VALUENUM[ce.LABEL=='O2 saturation pulseoxymetry'],
'g.', markersize=8, label='O2')
plt.plot(ce.index[ce.LABEL=='Arterial Blood Pressure mean'],
ce.VALUENUM[ce.LABEL=='Arterial Blood Pressure mean'],
'bv', markersize=8, label='MAP')
plt.plot(ce.index[ce.LABEL=='Respiratory Rate'],
ce.VALUENUM[ce.LABEL=='Respiratory Rate'],
'k+', markersize=8, label='RR')
plt.title('Vital signs over time from admission')
plt.ylim(0,130)
plt.legend()
# OPTION 1: load labevents data using the database connection
query =
SELECT de.subject_id
, de.charttime
, di.label, de.value, de.valuenum
, de.uom
FROM labevents de
INNER JOIN d_labitems di
ON de.itemid = di.itemid
where de.subject_id = 40084
le = pd.read_sql_query(query,conn)
# OPTION 2: load labevents from the CSV file
# le = pd.read_csv('data/example_labevents.csv', index_col='HOURSSINCEADMISSION')
# preview the labevents data
le.head()
# preview the ioevents data
le[le.LABEL=='HEMOGLOBIN']
plt.figure(figsize=(14, 10))
plt.plot(le.index[le.LABEL=='HEMATOCRIT'],
le.VALUENUM[le.LABEL=='HEMATOCRIT'],
'go', markersize=6, label='Haematocrit')
plt.plot(le.index[le.LABEL=='HEMOGLOBIN'],
le.VALUENUM[le.LABEL=='HEMOGLOBIN'],
'bv', markersize=8, label='Hemoglobin')
plt.title('Laboratory measurements over time from admission')
plt.ylim(0,38)
plt.legend()
# load ioevents
ioe = pd.read_csv('data/example_ioevents.csv',index_col='HOURSSINCEADMISSION_START')
ioe.head()
plt.figure(figsize=(14, 10))
plt.plot(ie.CHARTTIME[ie.LABEL=='Midazolam (Versed)'],
ie.RATE[ie.LABEL=='Midazolam (Versed)'],
'go', markersize=6, label='Midazolam (Versed)')
plt.plot(ie.CHARTTIME[ie.LABEL=='Propofol'],
ie.RATE[ie.LABEL=='Propofol'],
'bv', markersize=8, label='Propofol')
plt.plot(ie.CHARTTIME[ie.LABEL=='Fentanyl'],
ie.RATE[ie.LABEL=='Fentanyl'],
'k+', markersize=8, label='Fentanyl')
plt.title('Inputs over time from admission')
plt.ylim(0,380)
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Connect to the database
Step4: Load the chartevents data
Step5: Review the patient's heart rate
Step6: In a similar way, we can select rows from data using indexes.
Step7: Plot 1
Step8: Task 1
Step9: What is happening to this patient's heart rate?
Step10: Plot 2
Step11: Task 2
Step13: Task 3
Step15: To provide necessary context to this plot, it would help to include patient input data. This provides the necessary context to determine a patient's fluid balance - a key indicator in patient health.
Step16: Note that the column headers are different
Step17: As the plot shows, the patient's intake tends to be above their output (as one would expect!) - but there are periods where they are almost one to one. One of the biggest challenges of working with ICU data is that context is everything - let's look at a treatment (lasix) that we know will affect this graph.
Step18: Exercise 2
Step19: Plot 3
Step21: Plot 5
Step22: Plot 5
|
7,110
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
figsize(8, 6)
import sys
sys.path.insert(0, "../")
import pandas
import numpy
from folding_group import FoldingGroupClassifier
from rep.data import LabeledDataStorage
from rep.report import ClassificationReport
from rep.report.metrics import RocAuc
from sklearn.metrics import roc_curve, roc_auc_score
from decisiontrain import DecisionTrainClassifier
from rep.estimators import SklearnClassifier
import root_numpy
MC = pandas.DataFrame(root_numpy.root2array('../datasets/MC/csv/WG/Bu_JPsiK/2012/Tracks.root', stop=5000000))
data = pandas.DataFrame(root_numpy.root2array('../datasets/data/csv/WG/Bu_JPsiK/2012/Tracks.root', stop=5000000))
data.head()
MC.head()
from utils import data_tracks_preprocessing
data = data_tracks_preprocessing(data, N_sig_sw=True)
MC = data_tracks_preprocessing(MC)
', '.join(data.columns)
print sum(data.signB == 1), sum(data.signB == -1)
print sum(MC.signB == 1), sum(MC.signB == -1)
mask_sw_positive = (data.N_sig_sw.values > 1) * 1
data.head()
data['group_column'] = numpy.unique(data.event_id, return_inverse=True)[1]
MC['group_column'] = numpy.unique(MC.event_id, return_inverse=True)[1]
data.index = numpy.arange(len(data))
MC.index = numpy.arange(len(MC))
# features = ['cos_diff_phi', 'diff_pt', 'partPt', 'partP', 'nnkrec', 'diff_eta', 'EOverP',
# 'ptB', 'sum_PID_mu_k', 'proj', 'PIDNNe', 'sum_PID_k_e', 'PIDNNk', 'sum_PID_mu_e', 'PIDNNm',
# 'phi', 'IP', 'IPerr', 'IPs', 'veloch', 'max_PID_k_e', 'ghostProb',
# 'IPPU', 'eta', 'max_PID_mu_e', 'max_PID_mu_k', 'partlcs']
features = ['cos_diff_phi', 'partPt', 'partP', 'nnkrec', 'diff_eta', 'EOverP',
'ptB', 'sum_PID_mu_k', 'proj', 'PIDNNe', 'sum_PID_k_e', 'PIDNNk', 'sum_PID_mu_e', 'PIDNNm',
'phi', 'IP', 'IPerr', 'IPs', 'veloch', 'max_PID_k_e', 'ghostProb',
'IPPU', 'eta', 'max_PID_mu_e', 'max_PID_mu_k', 'partlcs']
b_ids_data = numpy.unique(data.group_column.values, return_index=True)[1]
b_ids_MC = numpy.unique(MC.group_column.values, return_index=True)[1]
Bdata = data.iloc[b_ids_data].copy()
BMC = MC.iloc[b_ids_MC].copy()
Bdata['Beta'] = Bdata.diff_eta + Bdata.eta
BMC['Beta'] = BMC.diff_eta + BMC.eta
Bdata['Bphi'] = Bdata.diff_phi + Bdata.phi
BMC['Bphi'] = BMC.diff_phi + BMC.phi
Bfeatures = ['Beta', 'Bphi', 'ptB']
hist(Bdata['ptB'].values, normed=True, alpha=0.5, bins=60,
weights=Bdata['N_sig_sw'].values)
hist(BMC['ptB'].values, normed=True, alpha=0.5, bins=60);
hist(Bdata['Beta'].values, normed=True, alpha=0.5, bins=60,
weights=Bdata['N_sig_sw'].values)
hist(BMC['Beta'].values, normed=True, alpha=0.5, bins=60);
hist(Bdata['Bphi'].values, normed=True, alpha=0.5, bins=60,
weights=Bdata['N_sig_sw'].values)
hist(BMC['Bphi'].values, normed=True, alpha=0.5, bins=60);
tt_base = DecisionTrainClassifier(learning_rate=0.02, n_estimators=1000,
n_threads=16)
data_vs_MC_B = pandas.concat([Bdata, BMC])
label_data_vs_MC_B = [0] * len(Bdata) + [1] * len(BMC)
weights_data_vs_MC_B = numpy.concatenate([Bdata.N_sig_sw.values * (Bdata.N_sig_sw.values > 1) * 1,
numpy.ones(len(BMC))])
weights_data_vs_MC_B_all = numpy.concatenate([Bdata.N_sig_sw.values, numpy.ones(len(BMC))])
tt_B = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=Bfeatures, group_feature='group_column')
%time tt_B.fit(data_vs_MC_B, label_data_vs_MC_B, sample_weight=weights_data_vs_MC_B)
pass
roc_auc_score(label_data_vs_MC_B, tt_B.predict_proba(data_vs_MC_B)[:, 1], sample_weight=weights_data_vs_MC_B)
roc_auc_score(label_data_vs_MC_B, tt_B.predict_proba(data_vs_MC_B)[:, 1], sample_weight=weights_data_vs_MC_B_all)
from hep_ml.reweight import GBReweighter, FoldingReweighter
reweighterB = FoldingReweighter(GBReweighter(), random_state=3444)
reweighterB.fit(BMC[Bfeatures], Bdata[Bfeatures], target_weight=Bdata.N_sig_sw)
BMC_weights = reweighterB.predict_weights(BMC[Bfeatures])
hist(Bdata['ptB'].values, normed=True, alpha=0.5, bins=60,
weights=Bdata['N_sig_sw'].values)
hist(BMC['ptB'].values, normed=True, alpha=0.5, bins=60, weights=BMC_weights);
weights_data_vs_MC_B_w = numpy.concatenate([Bdata.N_sig_sw.values * (Bdata.N_sig_sw.values > 1) * 1,
BMC_weights])
weights_data_vs_MC_B_all_w = numpy.concatenate([Bdata.N_sig_sw.values, BMC_weights])
tt_B = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=Bfeatures, group_feature='group_column')
%time tt_B.fit(data_vs_MC_B, label_data_vs_MC_B, sample_weight=weights_data_vs_MC_B_w)
roc_auc_score(label_data_vs_MC_B, tt_B.predict_proba(data_vs_MC_B)[:, 1], sample_weight=weights_data_vs_MC_B_all_w)
MC['N_sig_sw'] = BMC_weights[numpy.unique(MC.group_column.values, return_inverse=True)[1]]
def compute_target_number_of_tracks(X):
ids = numpy.unique(X.group_column, return_inverse=True)[1]
number_of_tracks = numpy.bincount(X.group_column)
target = number_of_tracks[ids]
return target
from decisiontrain import DecisionTrainRegressor
from rep.estimators import SklearnRegressor
from rep.metaml import FoldingRegressor
tt_base_reg = DecisionTrainRegressor(learning_rate=0.02, n_estimators=1000,
n_threads=16)
%%time
tt_data_NT = FoldingRegressor(SklearnRegressor(tt_base_reg), n_folds=2, random_state=321,
features=features)
tt_data_NT.fit(data, compute_target_number_of_tracks(data), sample_weight=data.N_sig_sw.values * mask_sw_positive)
from sklearn.metrics import mean_squared_error
mean_squared_error(compute_target_number_of_tracks(data), tt_data_NT.predict(data),
sample_weight=data.N_sig_sw.values) ** 0.5
mean_squared_error(compute_target_number_of_tracks(data),
[numpy.mean(compute_target_number_of_tracks(data))] * len(data),
sample_weight=data.N_sig_sw.values) ** 0.5
%%time
tt_MC_NT = FoldingRegressor(SklearnRegressor(tt_base_reg), n_folds=2, random_state=321,
features=features)
tt_MC_NT.fit(MC, compute_target_number_of_tracks(MC), sample_weight=MC.N_sig_sw.values)
mean_squared_error(compute_target_number_of_tracks(MC),
tt_MC_NT.predict(MC), sample_weight=MC.N_sig_sw.values) ** 0.5
mean_squared_error(compute_target_number_of_tracks(MC),
[numpy.mean(compute_target_number_of_tracks(MC))] * len(MC),
sample_weight=MC.N_sig_sw.values) ** 0.5
tt_MC_NT.get_feature_importances().sort_values(by='effect')[-5:]
tt_base = DecisionTrainClassifier(learning_rate=0.02, n_estimators=1000,
n_threads=16)
B_signs = data['signB'].groupby(data['group_column']).aggregate(numpy.mean)
B_weights = data['N_sig_sw'].groupby(data['group_column']).aggregate(numpy.mean)
B_signs_MC = MC['signB'].groupby(MC['group_column']).aggregate(numpy.mean)
B_weights_MC = MC['N_sig_sw'].groupby(MC['group_column']).aggregate(numpy.mean)
from scipy.special import logit, expit
def compute_Bprobs(X, track_proba, weights=None, normed_weights=False):
if weights is None:
weights = numpy.ones(len(X))
_, data_ids = numpy.unique(X['group_column'], return_inverse=True)
track_proba[~numpy.isfinite(track_proba)] = 0.5
track_proba[numpy.isnan(track_proba)] = 0.5
if normed_weights:
weights_per_events = numpy.bincount(data_ids, weights=weights)
weights /= weights_per_events[data_ids]
predictions = numpy.bincount(data_ids, weights=logit(track_proba) * X['signTrack'] * weights)
return expit(predictions)
tt_data = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
%time tt_data.fit(data, data.label, sample_weight=data.N_sig_sw.values * mask_sw_positive)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC, compute_Bprobs(MC, tt_data.predict_proba(MC)[:, 1]), sample_weight=B_weights_MC),
roc_auc_score(
B_signs, compute_Bprobs(data, tt_data.predict_proba(data)[:, 1]), sample_weight=B_weights)]})
tt_MC = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
%time tt_MC.fit(MC, MC.label)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC, compute_Bprobs(MC, tt_MC.predict_proba(MC)[:, 1]), sample_weight=B_weights_MC),
roc_auc_score(
B_signs, compute_Bprobs(data, tt_MC.predict_proba(data)[:, 1]), sample_weight=B_weights)]})
combined_data_MC = pandas.concat([data, MC])
combined_label = numpy.array([0] * len(data) + [1] * len(MC))
combined_weights_data = data.N_sig_sw.values #/ numpy.bincount(data.group_column)[data.group_column.values]
combined_weights_data_passed = combined_weights_data * mask_sw_positive
combined_weights_MC = MC.N_sig_sw.values# / numpy.bincount(MC.group_column)[MC.group_column.values]
combined_weights = numpy.concatenate([combined_weights_data_passed,
1. * combined_weights_MC / sum(combined_weights_MC) * sum(combined_weights_data_passed)])
combined_weights_all = numpy.concatenate([combined_weights_data,
1. * combined_weights_MC / sum(combined_weights_MC) * sum(combined_weights_data)])
%%time
tt_base_large = DecisionTrainClassifier(learning_rate=0.3, n_estimators=1000,
n_threads=20)
tt_data_vs_MC = FoldingGroupClassifier(SklearnClassifier(tt_base_large), n_folds=2, random_state=321,
train_features=features + ['label'], group_feature='group_column')
tt_data_vs_MC.fit(combined_data_MC, combined_label, sample_weight=combined_weights)
a = []
for n, p in enumerate(tt_data_vs_MC.staged_predict_proba(combined_data_MC)):
a.append(roc_auc_score(combined_label, p[:, 1], sample_weight=combined_weights))
plot(a)
combined_p = tt_data_vs_MC.predict_proba(combined_data_MC)[:, 1]
roc_auc_score(combined_label, combined_p, sample_weight=combined_weights)
roc_auc_score(combined_label, combined_p, sample_weight=combined_weights_all)
from utils import calibrate_probs, plot_calibration
combined_p_calib = calibrate_probs(combined_label, combined_weights, combined_p)[0]
plot_calibration(combined_p, combined_label, weight=combined_weights)
plot_calibration(combined_p_calib, combined_label, weight=combined_weights)
# reweight data predicted as data to MC
used_probs = combined_p_calib
data_probs_to_be_MC = used_probs[combined_label == 0]
MC_probs_to_be_MC = used_probs[combined_label == 1]
track_weights_data = numpy.ones(len(data))
# take data with probability to be data
mask_data = data_probs_to_be_MC < 0.5
track_weights_data[mask_data] = (data_probs_to_be_MC[mask_data]) / (1 - data_probs_to_be_MC[mask_data])
# reweight MC predicted as MC to data
track_weights_MC = numpy.ones(len(MC))
mask_MC = MC_probs_to_be_MC > 0.5
track_weights_MC[mask_MC] = (1 - MC_probs_to_be_MC[mask_MC]) / (MC_probs_to_be_MC[mask_MC])
# simple approach, reweight only MC
track_weights_only_MC = (1 - MC_probs_to_be_MC) / MC_probs_to_be_MC
# data_ids = numpy.unique(data['group_column'], return_inverse=True)[1]
# MC_ids = numpy.unique(MC['group_column'], return_inverse=True)[1]
# # event_weight_data = (numpy.bincount(data_ids, weights=data.N_sig_sw) / numpy.bincount(data_ids))[data_ids]
# # event_weight_MC = (numpy.bincount(MC_ids, weights=MC.N_sig_sw) / numpy.bincount(MC_ids))[MC_ids]
# # normalize weights for tracks in a way that sum w_track = 1 per event
# track_weights_data /= numpy.bincount(data_ids, weights=track_weights_data)[data_ids]
# track_weights_MC /= numpy.bincount(MC_ids, weights=track_weights_MC)[MC_ids]
hist(combined_p_calib[combined_label == 1], label='MC', normed=True, alpha=0.4, bins=60,
weights=combined_weights_MC)
hist(combined_p_calib[combined_label == 0], label='data', normed=True, alpha=0.4, bins=60,
weights=combined_weights_data);
legend(loc='best')
hist(track_weights_MC, normed=True, alpha=0.4, bins=60, label='MC')
hist(track_weights_data, normed=True, alpha=0.4, bins=60, label='RD');
legend(loc='best')
numpy.mean(track_weights_data), numpy.mean(track_weights_MC)
hist(combined_p_calib[combined_label == 1], label='MC', normed=True, alpha=0.4, bins=60,
weights=track_weights_MC * MC.N_sig_sw.values)
hist(combined_p_calib[combined_label == 0], label='data', normed=True, alpha=0.4, bins=60,
weights=track_weights_data * data.N_sig_sw.values);
legend(loc='best')
roc_auc_score(combined_label, combined_p_calib,
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values,
track_weights_MC * MC.N_sig_sw.values]))
%%time
tt_check = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=433,
train_features=features + ['label'], group_feature='group_column')
tt_check.fit(combined_data_MC, combined_label,
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values * mask_sw_positive,
track_weights_MC * MC.N_sig_sw.values]))
roc_auc_score(combined_label, tt_check.predict_proba(combined_data_MC)[:, 1],
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values * mask_sw_positive,
track_weights_MC * MC.N_sig_sw.values]))
# * sum(track_weights_data * mask_sw_positive) / sum(track_weights_MC)
roc_auc_score(combined_label, tt_check.predict_proba(combined_data_MC)[:, 1],
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values,
track_weights_MC * MC.N_sig_sw.values]))
# * sum(track_weights_data) / sum(track_weights_MC)
tt_reweighted_MC = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
%time tt_reweighted_MC.fit(MC, MC.label, sample_weight=track_weights_MC * MC.N_sig_sw.values)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_MC.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_MC.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_MC.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_MC.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
%%time
tt_reweighted_data = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
tt_reweighted_data.fit(data, data.label,
sample_weight=track_weights_data * data.N_sig_sw.values * mask_sw_positive)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_data.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_data.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_data.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_data.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
numpy.mean(mc_sum_weights_per_event), numpy.mean(data_sum_weights_per_event)
_, data_ids = numpy.unique(data['group_column'], return_inverse=True)
mc_sum_weights_per_event = numpy.bincount(MC.group_column.values, weights=track_weights_MC)
data_sum_weights_per_event = numpy.bincount(data_ids, weights=track_weights_data)
hist(mc_sum_weights_per_event, bins=60, normed=True, alpha=0.5)
hist(data_sum_weights_per_event, bins=60, normed=True, alpha=0.5, weights=B_weights);
hist(mc_sum_weights_per_event, bins=60, normed=True, alpha=0.5)
hist(data_sum_weights_per_event, bins=60, normed=True, alpha=0.5, weights=B_weights);
hist(numpy.bincount(MC.group_column), bins=81, normed=True, alpha=0.5, range=(0, 80))
hist(numpy.bincount(data.group_column), bins=81, normed=True, alpha=0.5, range=(0, 80));
hist(expit(p_tt_mc) - expit(p_data), bins=60, weights=B_weights, normed=True, label='standard approach',
alpha=0.5);
hist(expit(p_data_w_MC) - expit(p_data_w), bins=60, weights=B_weights, normed=True, label='compensate method',
alpha=0.5);
legend()
xlabel('$p_{MC}-p_{data}$')
from utils import compute_mistag
bins_perc = [10, 20, 30, 40, 50, 60, 70, 80, 90]
compute_mistag(expit(p_data), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_perc,
uniform=False, label='data')
compute_mistag(expit(p_tt_mc), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_perc,
uniform=False, label='MC')
compute_mistag(expit(p_data_w), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_perc,
uniform=False, label='new')
legend(loc='best')
xlim(0.3, 0.5)
ylim(0.2, 0.5)
bins_edg = numpy.linspace(0.3, 0.9, 10)
compute_mistag(expit(p_data), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_edg,
uniform=True, label='data')
compute_mistag(expit(p_tt_mc), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_edg,
uniform=True, label='MC')
compute_mistag(expit(p_data_w), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_edg,
uniform=True, label='new')
legend(loc='best')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import
Step2: Reading initial data
Step3: Data preprocessing
Step4: Define mask for non-B events
Step5: Define features
Step6: Test that B-events similar in MC and data
Step7: Test that number of tracks is independent on Track description
Step8: Define base estimator and B weights, labels
Step9: B probability computation
Step10: Inclusive tagging
Step11: Inclusive tagging
Step12: New method
Step13: train classifier to distinguish data and MC
Step14: quality
Step15: calibrate probabilities (due to reweighting rule where probabilities are used)
Step16: compute MC and data track weights
Step17: reweighting plotting
Step18: Check reweighting rule
Step19: Classifier trained on MC
Step20: Classifier trained on data
Step21:
Step22: Calibration
|
7,111
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
df = pd.read_csv('preparation.csv',delimiter=',')
df.shape
!ls
59400*0.8
dftrain = pd.read_csv('preparation.csv',delimiter=',',nrows=47520)
dfpayment = pd.read_csv('trainingset.csv',delimiter=',',nrows=47520)
dftrain.tail()
dftrain['terrain'] = 'dataran tinggi'
dftrain['terrain'][dftrain['gps_height']<=500] = 'dataran rendah'
dftrain['terrain'][(dftrain['gps_height']>500) & (dftrain['gps_height']<=1000)] = 'dataran sedang'
dftrain['terrain'].replace('dataran rendah',0,inplace=True)
dftrain['terrain'].replace('dataran sedang',1,inplace=True)
dftrain['terrain'].replace('dataran tinggi',2,inplace=True)
dftrain.head()
print dftrain.columns
dftrain['water_availability'] = dftrain.apply(lambda row: 0 if row['amount_tsh']==0.0 else 1,axis=1)
dftrain['payment'] = dfpayment['payment']
dftrain['payment'].unique()
dftrain['payment'].replace('pay annually',1,inplace=True)
dftrain['payment'].replace('never pay',2,inplace=True)
dftrain['payment'].replace('pay per bucket',3,inplace=True)
dftrain['payment'].replace('unknown',0,inplace=True)
dftrain['payment'].replace('pay when scheme fails',4,inplace=True)
dftrain['payment'].replace('other',5,inplace=True)
dftrain['payment'].replace('pay monthly',6,inplace=True)
dftrain2 = dftrain[['id','water_availability','terrain','num_private','region_code','district_code','population','water_quality','quality_group','quantity_group','source_type','source_class','waterpoint_type','wp_age','daysdiff','payment','status_group']].copy()
dftrain2.tail()
dftrain2.to_csv('dftrain.csv',sep=',',header=True,index=False)
dfvalidation = pd.read_csv('preparation.csv',delimiter=',',skiprows=range(1,47520),nrows=11880)
dfv_payment = pd.read_csv('trainingset.csv',delimiter=',',skiprows=range(1,47520),nrows=11880)
print dfvalidation.shape
dfvalidation.tail()
dfvalidation['terrain'] = 'dataran tinggi'
dfvalidation['terrain'][dftrain['gps_height']<=500] = 'dataran rendah'
dfvalidation['terrain'][(dfvalidation['gps_height']>500) & (dfvalidation['gps_height']<=1000)] = 'dataran sedang'
dfvalidation['terrain'].replace('dataran rendah',0,inplace=True)
dfvalidation['terrain'].replace('dataran sedang',1,inplace=True)
dfvalidation['terrain'].replace('dataran tinggi',2,inplace=True)
dfvalidation.head()
dfvalidation['water_availability'] = dfvalidation.apply(lambda row: 0 if row['amount_tsh']==0.0 else 1,axis=1)
dfvalidation['payment'] = dfv_payment['payment']
dfvalidation['payment'].replace('pay annually',1,inplace=True)
dfvalidation['payment'].replace('never pay',2,inplace=True)
dfvalidation['payment'].replace('pay per bucket',3,inplace=True)
dfvalidation['payment'].replace('unknown',0,inplace=True)
dfvalidation['payment'].replace('pay when scheme fails',4,inplace=True)
dfvalidation['payment'].replace('other',5,inplace=True)
dfvalidation['payment'].replace('pay monthly',6,inplace=True)
dfvalidation2 = dfvalidation[['id','water_availability','terrain','num_private','region_code','district_code','population','water_quality','quality_group','quantity_group','source_type','source_class','waterpoint_type','wp_age','daysdiff','payment','status_group']].copy()
dfvalidation2.head()
dfvalidation2.to_csv('validation.csv',sep=',',header=True,index=False)
!ls -l
print "done"
dftest = pd.read_csv('testset.csv',delimiter=',')
dftest.head()
dftest.isnull().sum().sum()
print list(dftest.columns)
dftest['source_class'].isnull().sum().sum()
dftest['public_meeting'].fillna('unknown',inplace=True)
dftest['payment'].unique()
dftest['wp_age'] = dftest.apply(lambda row: (2017-row['construction_year']),axis=1)
dftest['water_quality'].replace('soft',1,inplace=True)
dftest['water_quality'].replace('salty',2,inplace=True)
dftest['water_quality'].replace('milky',3,inplace=True)
dftest['water_quality'].replace('fluoride',4,inplace=True)
dftest['water_quality'].replace('coloured',5,inplace=True)
dftest['water_quality'].replace('salty abandoned',6,inplace=True)
dftest['water_quality'].replace('fluoride abandoned',7,inplace=True)
dftest['water_quality'].replace('unknown',0,inplace=True)
dftest['quantity_group'].replace('enough',1,inplace=True)
dftest['quantity_group'].replace('insufficient',2,inplace=True)
dftest['quantity_group'].replace('dry',3,inplace=True)
dftest['quantity_group'].replace('seasonal',4,inplace=True)
dftest['quantity_group'].replace('unknown',0,inplace=True)
dftest['waterpoint_type'].replace('communal standpipe',1,inplace=True)
dftest['waterpoint_type'].replace('communal standpipe multiple',2,inplace=True)
dftest['waterpoint_type'].replace('hand pump',3,inplace=True)
dftest['waterpoint_type'].replace('other',0,inplace=True)
dftest['waterpoint_type'].replace('improved spring',4,inplace=True)
dftest['waterpoint_type'].replace('cattle trough',5,inplace=True)
dftest['waterpoint_type'].replace('dam',6,inplace=True)
dftest['source_type'].replace('spring',1,inplace=True)
dftest['source_type'].replace('rainwater harvesting',2,inplace=True)
dftest['source_type'].replace('dam',3,inplace=True)
dftest['source_type'].replace('borehole',4,inplace=True)
dftest['source_type'].replace('other',0,inplace=True)
dftest['source_type'].replace('shallow well',5,inplace=True)
dftest['source_type'].replace('river/lake',6,inplace=True)
dftest['quality_group'].replace('good',1,inplace=True)
dftest['quality_group'].replace('salty',2,inplace=True)
dftest['quality_group'].replace('milky',3,inplace=True)
dftest['quality_group'].replace('unknown',0,inplace=True)
dftest['quality_group'].replace('fluoride',4,inplace=True)
dftest['quality_group'].replace('colored',5,inplace=True)
dftest['source_class'].replace('groundwater',1,inplace=True)
dftest['source_class'].replace('surface',2,inplace=True)
dftest['source_class'].replace('unknown',0,inplace=True)
import datetime as dt
dftest['today'] = dftest.apply(lambda row: dt.datetime.today().strftime('%Y-%m-%d'),axis=1)
dftest['date_recorded'] = [time.date() for time in dftest['date_recorded'].astype('datetime64[ns]')]
dftest['today'] = [time.date() for time in dftest['today'].astype('datetime64[ns]')]
dftest['lastcheckdays'] = abs(dftest['date_recorded'].sub(dftest['today'], axis=0))
dftest['daysdiff'] = dftest['lastcheckdays'] / np.timedelta64(1, 'D')
dftest['terrain'] = 'dataran tinggi'
dftest['terrain'][dftest['gps_height']<=500] = 'dataran rendah'
dftest['terrain'][(dftest['gps_height']>500) & (dftest['gps_height']<=1000)] = 'dataran sedang'
dftest['terrain'].replace('dataran rendah',0,inplace=True)
dftest['terrain'].replace('dataran sedang',1,inplace=True)
dftest['terrain'].replace('dataran tinggi',2,inplace=True)
dftest['water_availability'] = dftest.apply(lambda row: 0 if row['amount_tsh']==0.0 else 1,axis=1)
dftest2 = dftest._get_numeric_data()
dftest2.head()
dftest3 = dftest2[['id','water_availability','terrain','num_private','region_code','district_code','population','water_quality','quality_group','quantity_group','source_type','source_class','waterpoint_type','wp_age','daysdiff']].copy()
dftest3.head()
dftest3.to_csv('testsetfix.csv',sep=',',header=True,index=False)
dftest3 = pd.read_csv('testsetfix.csv',delimiter=',')
dftest3['payment'] = dftest['payment']
dftest3['payment'].unique()
dftest3['payment'].replace('pay annually',1,inplace=True)
dftest3['payment'].replace('never pay',2,inplace=True)
dftest3['payment'].replace('pay per bucket',3,inplace=True)
dftest3['payment'].replace('unknown',0,inplace=True)
dftest3['payment'].replace('pay when scheme fails',4,inplace=True)
dftest3['payment'].replace('other',5,inplace=True)
dftest3['payment'].replace('pay monthly',6,inplace=True)
dftest3 = dftest3[['id','water_availability','terrain','num_private','region_code','district_code','population','water_quality','quality_group','quantity_group','source_type','source_class','waterpoint_type','wp_age','daysdiff','payment']].copy()
dftest3.head()
dftest3.to_csv('testsetfix.csv',sep=',',header=True,index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: take 80% as train dataframe, and leave 20% as a validator/to check the accuracy of our prediction before applying the model to the real test data.
Step2: <h3>#prepare the training dataset for export</h3>
Step3: <h3>#prepare the validation dataset</h3>
Step4: <h3>#preparation of test dataset</h3>
|
7,112
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('nov11_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'
), how='all').reset_index(drop=True)
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(p, u, v)
# Show the plot
plt.show()
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof, Td)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof, Td)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Create a hodograph
# Create an inset axes object that is 40% width and height of the
# figure and put it in the upper right hand corner.
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, wind_speed) # Plot a line colored by wind speed
# Show the plot
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting Data
Step2: Thermodynamic Calculations
Step3: Basic Skew-T Plotting
Step4: Advanced Skew-T Plotting
Step5: Adding a Hodograph
|
7,113
|
<ASSISTANT_TASK:>
Python Code:
print("The answer should be three: " + str(1+2))
!nvidia-smi
#imports
import h5py
import pandas as pd
import numpy as np
import pprint as pp
import tensorflow as tf
from tensorflow.contrib import rnn
import math
import matplotlib.pyplot as plt
import warnings
import prepareData as prepData
# The data is prepared and stored in a seperate .h5 file.
# Set usePreparedData = False to use the original data and run the data preparation code
usePreparedData = True
# insampleCutoffTimestamp variable is used to split the data in time into two pieces to create training and test set.
insampleCutoffTimestamp = 1650
# If usePreparatedData is True, then the prepared data is stored. Otherwise, the original data is stored
if usePreparedData == True:
#with pd.HDFStore("/home/mimas/2sigma/DLI_FSI/2sigma/train_prepared.h5", 'r') as train:
with pd.HDFStore("2sigma/trainDataPrepared.h5", 'r') as train:
df = train.get("train")
else:
with pd.HDFStore("2sigma/train.h5", 'r') as train:
df = train.get("train")
# This will print the dataset
df
if usePreparedData == False:
# Original data is not clean and some the samples are a bit extreme.
# These values are removed from the feature set.
df = prepData.removeExtremeValues(df, insampleCutoffTimestamp)
# A little bit feature engineering. Hand-crafted features are created here to boost the accuracy.
df = prepData.createNewFeatures(df)
# Check whether ve still have any NaNs
df = prepData.fillNaNs(df)
df.to_hdf("2sigma/trainDataPrepared.h5", 'train')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.3)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.3)
return tf.Variable(initial)
n_time_steps = 10
def getDNN (x, LSTMCellSize, keep_prob):
with tf.name_scope('model'):
with tf.name_scope('RNN'):
# We will add two dropout layers and LSTM cells with the number of units as LSTMCellSize.
cell = rnn.DropoutWrapper(rnn.BasicLSTMCell(LSTMCellSize, forget_bias=2, activation=tf.nn.tanh), output_keep_prob=keep_prob)
# We use the cell to create RNN.
# Note that outputs is not a tensor, it is a list with one element which is numpy array.
outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
outputs_shape = outputs.get_shape().as_list()
# hidden layer with sigmoid activation
with tf.name_scope('W_fc1'):
W_fc1 = weight_variable([LSTMCellSize, 1])
with tf.name_scope('b_fc1'):
b_fc1 = bias_variable([1])
with tf.name_scope('pred'):
pred = tf.matmul(outputs[:,-1,:], W_fc1) + b_fc1
return pred
# The column names that will be included in the featureset are added into colList.
# colList will be used throughout the lab.
colList=[]
for thisColumn in df.columns:
if thisColumn not in ('id', 'timestamp', 'y', 'CntNs', 'y_lagged'):
colList.append(thisColumn)
colList.append('y_lagged')
#if you do not reset the default graph you will need to restart the kernel
#every time this notebook is run
tf.reset_default_graph()
# Network Parameters
# Number of units in the LSTM cell.
n_LSTMCell = len(colList)
# Placeholder for the input and the keep probability for the dropout layers
with tf.name_scope('input'):
x= tf.placeholder(tf.float32, shape=[None, n_time_steps, len(colList)])
with tf.name_scope('keep_prob'):
keep_prob = tf.placeholder(tf.float32)
# At the input, we create 2-layer LSTM cell (with dropout layers)
print('Building tensorflow graph')
# Graph construction for the LSTM based deep neural network.
# Structure of the network is depicted in the above figure.
# Please see the dnn.py to see the code of the network.
pred = getDNN (x, n_LSTMCell, keep_prob)
# Placeholder for the output (label)
with tf.name_scope('label'):
y = tf.placeholder(tf.float32, shape=[None, 1])
# Placeholder to be able to split the data into training and test set while training the network.
inSampleCutoff = tf.placeholder(tf.int32, shape = ())
# this is important - we only want to train on the in-sample set of rows using TensorFlow
y_inSample = y[0:inSampleCutoff]
pred_inSample = pred[0:inSampleCutoff]
# also extract out of sample predictions and actual values,
# we'll use them for evaluation while training the model.
y_outOfSample = y[inSampleCutoff:]
pred_outOfSample = pred[inSampleCutoff:]
with tf.name_scope('stats'):
# Pearson correlation to evaluate the model
covariance = tf.reduce_sum(tf.matmul(tf.transpose(tf.subtract(pred_inSample, tf.reduce_mean(pred_inSample))),tf.subtract(y_inSample, tf.reduce_mean(y_inSample))))
var_pred = tf.reduce_sum(tf.square(tf.subtract(pred_inSample, tf.reduce_mean(pred_inSample))))
var_y = tf.reduce_sum(tf.square(tf.subtract(y_inSample, tf.reduce_mean(y_inSample))))
pearson_corr = covariance / tf.sqrt(var_pred * var_y)
tf.summary.scalar("pearson_corr", pearson_corr)
# Training dataset is also created here. We included the code to split the data in the above cell.
# The difference is that the above code will be used in the training by the TensorFlow.
# This code will not be used by TensorFlow and creates the testing dataset whenever it is executed.
dfInSample = df[df.timestamp < insampleCutoffTimestamp]
# create a reference dataframe (that only depends on in-sample data)
# that gives us standard deviation and mean information on per-id basis
# we'll use it later for variance stabilization
meanStdById = dfInSample.groupby(['id']).agg( {'y':['mean', 'std']})
# Training parameters
display_step = 100
epoch = 1
pre_trained_model = 'SavedModels/model_epoch_10.ckpt'
mini_batch_limit = 1300
# set up adaptive learning rate:
globalStep = tf.placeholder(tf.float32)
# Ratio of globalStep / totalDecaySteps is designed to indicate how far we've progressed in training.
# the ratio is 0 at the beginning of training and is 1 at the end.
# adaptiveLearningRate will thus change from the starting learningRate to learningRate * decay_rate
# in order to simplify the code, we are fixing the total number of decay steps at 1 and pass globalStep
# as a fraction that starts with 0 and tends to 1.
# Learning rate should be set to 0.002 if you are training from scratch.
# Learning rate should be set to 0.00058 if you are using the pre-trained network with 10 epochs.
# Learning rate should be set to 0.00061 if you are using the pre-trained network with 15 epochs.
adaptiveLearningRate = tf.train.exponential_decay(
0.00058, # Start with this learning rate
globalStep, # globalStep / totalDecaySteps shows how far we've progressed in training
1, # totalDecaySteps
0.3) # decay_rate, the factor by which the starting learning rate will be
# multiplied when the training is finished
# Define loss and optimizer
# Note the loss only involves in-sample rows
# Regularization is added in the loss function to avoid over-fitting
rnn_variables = lstm_variables = [v for v in tf.trainable_variables()
if v.name.startswith('rnn')]
with tf.name_scope('loss'):
loss = tf.nn.l2_loss(tf.subtract(y_inSample,pred_inSample)) + tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(scale=0.0001), tf.trainable_variables())
tf.summary.scalar("loss", loss)
optimizer = tf.train.AdamOptimizer(learning_rate=adaptiveLearningRate).minimize (loss)
# Getting unique ids to train the network per id basis.
ids = df.id.unique()
ids.sort()
summary_op = tf.summary.merge_all()
# initialize the variables
init = tf.global_variables_initializer()
totalActual = []
totalPredicted = []
import random
# Launch the graph
# Implement Cross Validation, but in a vay that preserves temporal structure for id's
with tf.Session() as sess:
# Global variables are initialized
sess.run(init)
# Restore latest checkpoint
model_saver = tf.train.Saver()
model_saver.restore(sess, pre_trained_model)
writer = tf.summary.FileWriter("logs", graph=tf.get_default_graph())
step = 50
writer_step = 1;
for i in range(epoch):
print('Epoch: ', i, '******************************')
actual = []
predicted = []
random.shuffle(ids)
for thisId in ids:
# Getting the data of the current id
this_df = df[df.id == thisId].copy()
this_df = this_df.sort_values(['id', 'timestamp'])
# we need to pass training set to the graph definition
# optimization will only consider in training set
inSampleSize, _ = this_df[this_df.timestamp < insampleCutoffTimestamp].shape
totalRows, _ = this_df.shape
batch_y = this_df.loc[:,'y'].values
batch_x = this_df[colList].values
if totalRows < n_time_steps:
continue
# Data is formated as a 3D tensor with the shape of (batch_size, n_time, n_feature) for LSTM
# n_time_steps parameter determines how many steps that LSTM will unroll in time
complete_x = np.zeros([totalRows-n_time_steps+1, n_time_steps, len(colList)])
for n in range(n_time_steps):
complete_x[:,n,:]=batch_x[n:totalRows-n_time_steps+n+1,:]
batch_y = batch_y[n_time_steps-1:]
inSampleSize -= n_time_steps - 1
# variance stabilizing transform
# some id's will not have in-sample rows, we cannot perform transform on those
# furthermore, since there is not in-sample rows to train on, we must skip
if inSampleSize < 10:
continue
# perform variance stabilization
thisMean = meanStdById.loc[thisId][0]
thisStd = meanStdById.loc[thisId][1]
batch_y = (batch_y - thisMean) / thisStd
batch_y = batch_y.reshape(-1,1)
minibatchSize, _ = batch_y.shape
# we want to make sure that RNN reaches steady state
if minibatchSize < mini_batch_limit:
continue
# Run optimization
# note: keep_prob is set to 0.5 for training only!
_, currentRate = sess.run([optimizer, adaptiveLearningRate], feed_dict={x: complete_x, y: batch_y, keep_prob:0.5, inSampleCutoff:inSampleSize, globalStep:i/epoch})
# Obtain out of sample target variable and our prediction
y_oos, pred_oos = sess.run([y_outOfSample, pred_outOfSample], feed_dict={x: complete_x, y: batch_y, keep_prob:1.0, inSampleCutoff:inSampleSize})
# flatten the returned lists
y_oos = [y for x in y_oos for y in x]
pred_oos = [y for x in pred_oos for y in x]
#reverse transform before recording the results
if inSampleSize:
y_oos = [ (t*thisStd + thisMean) for t in y_oos]
pred_oos = [ (t*thisStd + thisMean) for t in pred_oos]
# record the results
actual.extend(y_oos)
predicted.extend(pred_oos)
totalActual.extend(y_oos)
totalPredicted.extend(pred_oos)
# Once every display_step show some diagnostics - the loss function, in-sample correlation, etc.
if step % display_step == 0:
# Calculate batch accuracy
# Calculate batch loss
correl, lossResult, summary = sess.run([pearson_corr, loss, summary_op], feed_dict={x: complete_x, y: batch_y, keep_prob:1.0, inSampleCutoff:inSampleSize})
writer.add_summary(summary, writer_step)
writer_step += 1
# corrcoef sometimes fails to compute correlation for a perfectly valid reason (e.g. stdev(pred_oos) is 0)
# it sets the result to nan, but also gives an annoying warning
# the following suppresses the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
correl_oos = np.corrcoef(y_oos, pred_oos)[0,1]
print('LR: %s - Iter %s, minibatch loss = %s, minibatch corr = %s, oos %s (%s/%s)' % (currentRate, step, lossResult, correl, correl_oos, inSampleSize, totalRows))
step += 1
print('Optimization Finished!')
print('Correl: ', np.corrcoef(actual, predicted)[0,1])
! pwd
actual = totalActual
predicted = totalPredicted
actualMeanReturn = []
predictedMeanReturn = []
stdActualReturns = []
# Buckets are created
buckets = np.arange(-0.02,0.02,0.002)
actual = np.array(actual)
predicted = np.array(predicted)
# Predicted values and the actual values are placed into buckets
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(len(buckets)-1):
index = np.logical_and(predicted>buckets[i], predicted<buckets[i+1])
thisBucket = actual[index].mean()
actualMeanReturn.append(thisBucket)
predictedMeanReturn.append(predicted[index].mean())
stdActualReturns.append(actual[index].std())
# Actual versus predicted values are plotted
plt.figure()
plt.plot(predictedMeanReturn,actualMeanReturn, marker='*')
plt.xlabel('predicted')
plt.ylabel('actual')
plt.grid(True)
plt.show()
plt.figure()
plt.errorbar(predictedMeanReturn, actualMeanReturn, yerr = stdActualReturns, marker='*')
plt.xlabel('predicted')
plt.ylabel('actual')
plt.grid(True)
plt.show()
! tar -cvf output3.zip * --exclude="2sigma"
!tar -cvf output.zip main.ipynb prepareData.py dnn.jpg data.jpg data_split.jpg rnn.jpg
! ls
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's execute the cell below to display information about the GPUs running on the server.
Step2: 2. Lab Overview
Step3: Data Preparation
Step4: There are multiple instruments in the dataset and each instrument has an id. Time is represented by the 'timestamp' feature. Let's look at the data.
Step5: If the original data is stored, the data preparation code will be executed in the following cell. First, extreme values in each feature set are removed. Then, some hand-crafted features are added to feature set to boost the prediction accuracy. There are many methods including PCA and auto-encoders to do the feature engineering rather than creating hand-crafted features. As an exercise, we highly recommend you to add auto-encoders to the code and check the accuracy after the lab. Lastly, NaNs are replaced with the median of the feature.
Step6: Model Construction
Step7: Training and Testing
Step8: In most of the traditional machine learning and deep learning methods, it is assumed that the feature set and predicted value have zero mean and unit variance gaussian distribution. Empirical studies show that the financial data such as asset returns is often not compatible with this assumption. That is why we normalize the "y" variable by subtracting its mean and dividing the result by the standard deviation in the following cell. As an exercise, you can also normalize the features and see if you improve the accuracy.
Step9: We are ready to launch the graph for training the model and see intermediate diagnostics results and the final result. We defined the important hyperparameters including the epoch, training batch size and learning rate at the top of the cell. Initially, the epoch is set to 1 because it takes 15-20 minutes to complete the training with 10 epochs even though we are using GPUs. In order to speed up the training in the lab environment, we provided pre-trained networks with 10 epochs and 20 epochs. An adaptive learning rate starting from 0.002 with exponential decay is used for the training from scratch. Learning rate should be set to 0.00058 and 0.00061 for using pre-trained models with 10 and 15 epochs respectively.
Step10: It takes 3-5 minutes to complete the training with 1 epochs. We also provided TensorBoard to review the model architecture, loss and correlation variables. TensorBoard is a suite of web applications for inspecting and understanding your TensorFlow runs and graphs.
Step11: How much variance is there?
|
7,114
|
<ASSISTANT_TASK:>
Python Code:
# Loads the training and test data sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
first_image = X_train[0, :, :]
# To interpret the values as a 28x28 image, we need to reshape
# the numpy array, which is one dimensional.
plt.imshow(first_image, cmap=plt.cm.Greys);
num_classes = len(np.unique(y_train))
num_classes
# 60K training 28 x 28 (pixel) images
X_train.shape
# 10K test 28 x 28 (pixel) images
X_test.shape
input_dim = np.prod(X_train.shape[1:])
input_dim
# The training and test data sets are integers, ranging from 0 to 255.
# We reshape the training and test data sets to be matrices with 784 (= 28 * 28) features.
X_train = X_train.reshape(60000, input_dim).astype('float32')
X_test = X_test.reshape(10000, input_dim).astype('float32')
# Scales the training and test data to range between 0 and 1.
max_value = X_train.max()
X_train /= max_value
X_test /= max_value
# The training and test labels are integers from 0 to 9 indicating the class label
(y_train, y_test)
# We convert the class labels to binary class matrices
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(input_dim,)))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
import json
json.loads(model.to_json())
# Trains the model, iterating on the training data in batches of 32 in 3 epochs.
# Using the Adam optimizer.
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=32, epochs=3, verbose=1)
# Test accuracy is ~98%.
model.evaluate(X_test, y_test)
first_test_image = X_test[0, :]
plt.imshow(first_test_image.reshape(28, 28), cmap=plt.cm.Greys);
second_test_image = X_test[1, :]
plt.imshow(second_test_image.reshape(28, 28), cmap=plt.cm.Greys);
model.predict_classes(X_test[[0, 1], :])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multilayer Perceptron
Step2: Different Ways to Summarize Model
Step3: Train Classifier
Step4: Model Evaluation
Step5: Predicting a Couple of Held-Out Images
|
7,115
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
iris = load_iris()
X, y = iris.data, iris.target
classifier = KNeighborsClassifier()
y
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
test_size=0.5,
random_state=123)
print("Etiquetas para los datos de entrenamiento y test")
print(train_y)
print(test_y)
print('Todos:', np.bincount(y) / float(len(y)) * 100.0)
print('Entrenamiento:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
test_size=0.5,
random_state=123,
stratify=y)
print('Todos:', np.bincount(y) / float(len(y)) * 100.0)
print('Entrenamiento:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
classifier.fit(train_X, train_y)
pred_y = classifier.predict(test_X)
print("CCR [Accuracy]:")
print(np.mean(pred_y == test_y))
print('Ejemplos correctamente clasificados:')
correct_idx = np.where(pred_y == test_y)[0]
print(correct_idx)
print('\nEjemplos incorrectamente clasificados:')
incorrect_idx = np.where(pred_y != test_y)[0]
print(incorrect_idx)
# Representar en 2D
colors = ["darkblue", "darkgreen", "gray"]
for n, color in enumerate(colors):
idx = np.where(test_y == n)[0]
plt.scatter(test_X[idx, 1], test_X[idx, 2], color=color, label="Clase %s" % str(n))
plt.scatter(test_X[incorrect_idx, 1], test_X[incorrect_idx, 2], color="darkred")
plt.xlabel('sepal width [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc=3)
plt.title("Resultados de clasificación en iris con KNN")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Si pensamos la forma en que normalmente se aplica el aprendizaje automático, la idea de una partición de entrenamiento y test tiene sentido. Los sistemas del mundo real se entrenan utilizando los datos de los que se dispone y, conforme otros datos llegan (de nuevos clientes, de otros sensores o de otras fuentes), el modelo que fue previamente entrenado debe predecir nuevos datos. Podemos simular esto durante el aprendizaje mediante una partición train/test -- los datos de test serán una simulación de "datos futuros" que vendrán al sistema en la etapa de producción.
Step2: Consejo
Step3: Para conseguir realizar una partición estratificada, tenemos que incluir el array de etiquetas cuando invocamos a la función train_test_split
Step4: Si evaluamos el rendimiento de nuestro clasificador con datos que se han empleado para el entrenamiento, podríamos llegar a unos resultados demasiado optimistas. En el peor caso, el modelo puede simplemente memorizar los datos de entrenamiento, pero fallar estrepitosamente cuando tenga que clasificar nuevos datos similares - nunca querríamos tener un sistema así en producción.
Step5: Podemos visualizar los aciertos y los fallos
|
7,116
|
<ASSISTANT_TASK:>
Python Code:
from chemspipy import ChemSpider
# Tip: Store your security token as an environment variable to reduce the chance of accidentally sharing it
import os
mytoken = os.environ['CHEMSPIDER_SECURITY_TOKEN']
cs = ChemSpider(security_token=mytoken)
comp = cs.get_compound(2157)
comp
print(comp.molecular_formula)
print(comp.molecular_weight)
print(comp.smiles)
print(comp.common_name)
for result in cs.search('glucose'):
print(result)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then connect to ChemSpider by creating a ChemSpider instance using your security token
Step2: All your interaction with the ChemSpider database should now happen through this ChemSpider object, cs.
Step3: Now we have a Compound object called comp. We can get various identifiers and calculated properties from this object
Step4: Search for a name
|
7,117
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import aiplatform
REGION = "us-central1"
PROJECT = !(gcloud config get-value project)
PROJECT = PROJECT[0]
# Set `PATH` to include the directory containing KFP CLI
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
%%writefile ./pipeline_vertex/pipeline_vertex_automl.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
Kubeflow Covertype Pipeline.
import os
from google_cloud_pipeline_components.aiplatform import (
AutoMLTabularTrainingJobRunOp,
EndpointCreateOp,
ModelDeployOp,
TabularDatasetCreateOp,
)
from kfp.v2 import dsl
PIPELINE_ROOT = os.getenv("PIPELINE_ROOT")
PROJECT = os.getenv("PROJECT")
DATASET_SOURCE = os.getenv("DATASET_SOURCE")
PIPELINE_NAME = os.getenv("PIPELINE_NAME", "covertype")
DISPLAY_NAME = os.getenv("MODEL_DISPLAY_NAME", PIPELINE_NAME)
TARGET_COLUMN = os.getenv("TARGET_COLUMN", "Cover_Type")
SERVING_MACHINE_TYPE = os.getenv("SERVING_MACHINE_TYPE", "n1-standard-16")
@dsl.pipeline(
name=f"{PIPELINE_NAME}-vertex-automl-pipeline",
description=f"AutoML Vertex Pipeline for {PIPELINE_NAME}",
pipeline_root=PIPELINE_ROOT,
)
def create_pipeline():
dataset_create_task = TabularDatasetCreateOp(
display_name=DISPLAY_NAME,
bq_source=DATASET_SOURCE,
project=PROJECT,
)
automl_training_task = AutoMLTabularTrainingJobRunOp(
project=PROJECT,
display_name=DISPLAY_NAME,
optimization_prediction_type="classification",
dataset=dataset_create_task.outputs["dataset"],
target_column=TARGET_COLUMN,
)
endpoint_create_task = EndpointCreateOp(
project=PROJECT,
display_name=DISPLAY_NAME,
)
model_deploy_task = ModelDeployOp( # pylint: disable=unused-variable
model=automl_training_task.outputs["model"],
endpoint=endpoint_create_task.outputs["endpoint"],
deployed_model_display_name=DISPLAY_NAME,
dedicated_resources_machine_type=SERVING_MACHINE_TYPE,
dedicated_resources_min_replica_count=1,
dedicated_resources_max_replica_count=1,
)
ARTIFACT_STORE = f"gs://{PROJECT}-kfp-artifact-store"
PIPELINE_ROOT = f"{ARTIFACT_STORE}/pipeline"
DATASET_SOURCE = f"bq://{PROJECT}.covertype_dataset.covertype"
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT={PROJECT}
%env REGION={REGION}
%env DATASET_SOURCE={DATASET_SOURCE}
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
PIPELINE_JSON = "covertype_automl_vertex_pipeline.json"
!dsl-compile-v2 --py pipeline_vertex/pipeline_vertex_automl.py --output $PIPELINE_JSON
!head {PIPELINE_JSON}
aiplatform.init(project=PROJECT, location=REGION)
pipeline = aiplatform.PipelineJob(
display_name="automl_covertype_kfp_pipeline",
template_path=PIPELINE_JSON,
enable_caching=True,
)
pipeline.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Understanding the pipeline design
Step3: Compile the pipeline
Step4: Let us make sure that the ARTIFACT_STORE has been created, and let us create it if not
Step5: Use the CLI compiler to compile the pipeline
Step6: Note
Step7: Deploy the pipeline package
|
7,118
|
<ASSISTANT_TASK:>
Python Code:
def maxProfit(a , b , n ) :
maxP = - 1
for i in range(0 , n + 1 ) :
sumA = sum(a[: i ] )
sumB = sum(b[i : ] )
maxP = max(maxP , sumA + sumB )
return maxP
if __name__== "__main __":
a =[2 , 3 , 2 ]
b =[10 , 30 , 40 ]
print(maxProfit(a , b , 4 ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,119
|
<ASSISTANT_TASK:>
Python Code:
from obspy import UTCDateTime
from obspy.clients.fdsn import Client as FDSN_Client
from obspy import read_inventory
client = FDSN_Client("GEONET")
inventory = client.get_stations(latitude=-42.693,longitude=173.022,maxradius=0.5, starttime = "2016-11-13 11:05:00.000",endtime = "2016-11-14 11:00:00.000")
print(inventory)
_=inventory.plot(projection="local")
inventory = client.get_stations(station="KUZ",level="response",
starttime = "2016-11-13 11:05:00.000",endtime = "2016-11-14 11:00:00.000")
print(inventory)
network = inventory[0]
station = network[0] # equivalent to inventory[0][0]
num_channels = len(station)
print(station)
channel = station[0] # equivalent to inventory[0][0][0]
print(channel)
resp = channel.response
print(resp)
resp.plot(0.001,output="VEL",label='KUZ HHZ')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define GeoNet FDSN client
Step2: Accessing Station Metadata
Step3: The following examples dive into retrieving different information from the inventory object. This object is based on FDSN stationXML and therefore can provide much the same information.
Step4: Now, we can look at more information, such as specifics about the station. Such as the time it opened and location.
Step5: We can drill down even futher into a particular channel and look at the time it was operating for, whether it was continously recording, the sample rate and some basic sensor information.
Step6: This channel states that there is response information available, so we can look at a summary of the response and plot it.
|
7,120
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
### START CODE HERE ### (≈ 3 lines of code)
m_train = None
m_test = None
num_px = None
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = None
test_set_x_flatten = None
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
# GRADED FUNCTION: sigmoid
def sigmoid(z):
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
### START CODE HERE ### (≈ 1 line of code)
s = None
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
### START CODE HERE ### (≈ 1 line of code)
w = None
b = None
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = None # compute activation
cost = None # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = None
db = None
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = None
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = None
b = None
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = None
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
pass
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
print ("predictions = " + str(predict(w, b, X)))
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = None
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = None
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = None
Y_prediction_train = None
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 - Overview of the Problem set
Step2: We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Step3: Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
Step4: Expected Output for m_train, m_test and num_px
Step5: Expected Output
Step7: <font color='blue'>
Step9: Expected Output
Step11: Expected Output
Step13: Expected Output
Step14: Expected Output
Step16: Expected Output
Step17: Run the following cell to train your model.
Step18: Expected Output
Step19: Let's also plot the cost function and the gradients.
Step20: Interpretation
Step21: Interpretation
|
7,121
|
<ASSISTANT_TASK:>
Python Code:
r = Symbol('r',positive=True)
V = 1/r
F = - diff(V, r)
F
epos = np.array([[0.0, 1.0, 0.0],
[0.2, 0.3, 0.0]])
npos = np.array([[0.0, 0.0, .0]])
def compute_bare_force(npos, epos, F, r):
forces = np.zeros_like(npos)
for ion_idx,ion_pos in enumerate(npos):
for elec in epos:
dr = elec - ion_pos
dr2 = np.dot(dr,dr)
dr_norm = np.sqrt(dr2)
#print dr_norm
dr_hat = dr/dr_norm
force_component = float(F.subs(r, dr_norm))
#print dr_norm, 1.0/dr_norm, force_component
forces[ion_idx] += force_component*dr_hat
#print ''
return forces
forces = compute_bare_force(npos, epos, F, r)
print forces
M = Symbol('M', integer=True)
k = Symbol('k', integer=True,positive=True)
a = IndexedBase('a',(M,))
Rc = Symbol('R_c', positive=True)
# Equation 4 in the paper
fbar = Sum(a[k]*r**k, (k,1,M))
fbar
# Integrate individual terms
integrate(r**k,(r,0,Rc)).doit()
# Still need to get to equations 6-8 from equations 2-4
m = Symbol('m')
c = IndexedBase('c',(M,))
j = Symbol('j',integer=True)
Skj = Rc**(m+k+j+1)/(m+k+j+1)
hj = Rc**(j+1)/(j+1)
hj
grfits = OrderedDict()
Mval = 4
mval = 2
S = np.zeros((Mval,Mval))
h = np.zeros(Mval)
Rcval = 0.4
for kval in range(Mval):
for jval in range(Mval):
S[kval,jval] = Skj.subs({M:Mval,m:mval,k:kval+1,j:jval+1,Rc:Rcval})
for jval in range(Mval):
h[jval] = hj.subs({j:jval+1, Rc:Rcval})
print S
print h
ck = np.linalg.solve(S,h)
ck
# Dump in C++ format for inclusion in the unit test
print '// for m_exp=%d, N_basis=%d, Rcut=%f'%(mval, Mval, Rcval)
print 'coeff[%d] = {'%len(ck),
print ','.join(['%g'%ck1 for ck1 in ck]),
#for c in ck:
# print '%g,'%c,
print '};'
np.dot(np.linalg.inv(S),h)
gr = Sum(c[k]*r**(k+m),(k,1,M))
gr
gr2 = gr.subs({M:Mval, m:mval}).doit()
cc = c.subs(M,Mval)
print 'gr2 = ',gr2
for kval in range(Mval):
print kval, c[kval+1],ck[kval]
gr2 = gr2.subs(cc[kval+1],ck[kval])
print kval,gr2
gr2
grfits[Mval] = gr2
def compute_smoothed_force(npos, epos, F, r):
forces = np.zeros_like(npos)
for ion_idx,ion_pos in enumerate(npos):
for elec in epos:
dr = elec - ion_pos
dr2 = np.dot(dr,dr)
dr_norm = np.sqrt(dr2)
#print dr_norm
dr_hat = dr/dr_norm
#print 'dr_norm',dr_norm
if dr_norm < Rcval:
force_component = float(gr2.subs(r, dr_norm)/dr2)
else:
force_component = float(F.subs(r, dr_norm))
#print dr_norm, 1.0/dr_norm, force_component
forces[ion_idx] += force_component*dr_hat
#print ''
return forces
print 'bare =',compute_bare_force(npos, epos, F, r)
forces = compute_smoothed_force(npos, epos, F, r)
print 'smoothed =',forces
xss = OrderedDict()
yss = OrderedDict()
xs = []
ys = []
step = Rcval/50
for i in range(50):
rval = i*step + step
xs.append(rval)
ys.append(exp(-rval**2))
#xss[0] = xs
#yss[0] = ys
for Mval,gr2 in grfits.iteritems():
xs = []
ys = []
for i in range(50):
rval = i*step + step
#print rval, gr2.subs(r,rval)
xs.append(rval)
#ys.append(exp(-rval**2)*gr2.subs(r,rval))
ys.append(gr2.subs(r,rval))
xss[Mval] = xs
yss[Mval] = ys
for Mval in xss.keys()[0:1]:
plt.plot(xss[Mval],yss[Mval])
plt.show()
fx = lambda rval: exp(-rval**2)
print 0,mpmath.quad(fx, [0,Rcval])
for Mval, gr2 in grfits.iteritems():
#fx = lambda rval : exp(-rval**2)*gr2.subs(r, rval)
fx = lambda rval : gr2.subs(r,rval)
ival = mpmath.quad(fx, [0,Rcval])
#ival = integrate(exp(-r**2)*gr2/r**2,(r,0,Rcval))
print Mval,ival
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: One solution is to smooth the contribution inside some cutoff radius $R_c$.
Step2: The goal is to fit a constant function with a polynomial that is missing the constant term (only powers of $r$ with $k\gt 0$)
Step3: Matrix and vector that define the coefficients, equation 8 in the paper
|
7,122
|
<ASSISTANT_TASK:>
Python Code:
ph_sel_name = "DexDem"
data_id = "12d"
# ph_sel_name = "all-ph"
# data_id = "7d"
from fretbursts import *
init_notebook()
from IPython.display import display
data_dir = './data/singlespot/'
import os
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)
## Selection for POLIMI 2012-11-26 datatset
labels = ['17d', '27d', '7d', '12d', '22d']
files_dict = {lab: fname for lab, fname in zip(labels, file_list)}
files_dict
ph_sel_map = {'all-ph': Ph_sel('all'), 'Dex': Ph_sel(Dex='DAem'),
'DexDem': Ph_sel(Dex='Dem')}
ph_sel = ph_sel_map[ph_sel_name]
data_id, ph_sel_name
d = loader.photon_hdf5(filename=files_dict[data_id])
d.ph_times_t, d.det_t
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
plot_alternation_hist(d)
loader.alex_apply_period(d)
d
d.time_max
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)
dplot(d, timetrace_bg)
d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
bs_kws = dict(L=10, m=10, F=7, ph_sel=ph_sel)
d.burst_search(**bs_kws)
th1 = 30
ds = d.select_bursts(select_bursts.size, th1=30)
bursts = (bext.burst_data(ds, include_bg=True, include_ph_index=True)
.round({'E': 6, 'S': 6, 'bg_d': 3, 'bg_a': 3, 'bg_aa': 3, 'nd': 3, 'na': 3, 'naa': 3, 'nda': 3, 'nt': 3, 'width_ms': 4}))
bursts.head()
burst_fname = ('results/bursts_usALEX_{sample}_{ph_sel}_F{F:.1f}_m{m}_size{th}.csv'
.format(sample=data_id, th=th1, **bs_kws))
burst_fname
bursts.to_csv(burst_fname)
assert d.dir_ex == 0
assert d.leakage == 0
print(d.ph_sel)
dplot(d, hist_fret);
# if data_id in ['7d', '27d']:
# ds = d.select_bursts(select_bursts.size, th1=20)
# else:
# ds = d.select_bursts(select_bursts.size, th1=30)
ds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)
n_bursts_all = ds.num_bursts[0]
def select_and_plot_ES(fret_sel, do_sel):
ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)
ds_do = ds.select_bursts(select_bursts.ES, **do_sel)
bpl.plot_ES_selection(ax, **fret_sel)
bpl.plot_ES_selection(ax, **do_sel)
return ds_fret, ds_do
ax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)
if data_id == '7d':
fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)
do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '12d':
fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '17d':
fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '22d':
fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '27d':
fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
n_bursts_do = ds_do.num_bursts[0]
n_bursts_fret = ds_fret.num_bursts[0]
n_bursts_do, n_bursts_fret
d_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)
print ('D-only fraction:', d_only_frac)
dplot(ds_fret, hist2d_alex, scatter_alpha=0.1);
dplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);
def hsm_mode(s):
Half-sample mode (HSM) estimator of `s`.
`s` is a sample from a continuous distribution with a single peak.
Reference:
Bickel, Fruehwirth (2005). arXiv:math/0505419
s = memoryview(np.sort(s))
i1 = 0
i2 = len(s)
while i2 - i1 > 3:
n = (i2 - i1) // 2
w = [s[n-1+i+i1] - s[i+i1] for i in range(n)]
i1 = w.index(min(w)) + i1
i2 = i1 + n
if i2 - i1 == 3:
if s[i1+1] - s[i1] < s[i2] - s[i1 + 1]:
i2 -= 1
elif s[i1+1] - s[i1] > s[i2] - s[i1 + 1]:
i1 += 1
else:
i1 = i2 = i1 + 1
return 0.5*(s[i1] + s[i2])
E_pr_do_hsm = hsm_mode(ds_do.E[0])
print ("%s: E_peak(HSM) = %.2f%%" % (ds.ph_sel, E_pr_do_hsm*100))
E_fitter = bext.bursts_fitter(ds_do, weights=None)
E_fitter.histogram(bins=np.arange(-0.2, 1, 0.03))
E_fitter.fit_histogram(model=mfit.factory_gaussian())
E_fitter.params
res = E_fitter.fit_res[0]
res.params.pretty_print()
E_pr_do_gauss = res.best_values['center']
E_pr_do_gauss
bandwidth = 0.03
E_range_do = (-0.1, 0.15)
E_ax = np.r_[-0.2:0.401:0.0002]
E_fitter.calc_kde(bandwidth=bandwidth)
E_fitter.find_kde_max(E_ax, xmin=E_range_do[0], xmax=E_range_do[1])
E_pr_do_kde = E_fitter.kde_max_pos[0]
E_pr_do_kde
mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, plot_model=False)
plt.axvline(E_pr_do_hsm, color='m', label='HSM')
plt.axvline(E_pr_do_gauss, color='k', label='Gauss')
plt.axvline(E_pr_do_kde, color='r', label='KDE')
plt.xlim(0, 0.3)
plt.legend()
print('Gauss: %.2f%%\n KDE: %.2f%%\n HSM: %.2f%%' %
(E_pr_do_gauss*100, E_pr_do_kde*100, E_pr_do_hsm*100))
nt_th1 = 50
dplot(ds_fret, hist_size, which='all', add_naa=False)
xlim(-0, 250)
plt.axvline(nt_th1)
Th_nt = np.arange(35, 120)
nt_th = np.zeros(Th_nt.size)
for i, th in enumerate(Th_nt):
ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)
nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th
plt.figure()
plot(Th_nt, nt_th)
plt.axvline(nt_th1)
nt_mean = nt_th[np.where(Th_nt == nt_th1)][0]
nt_mean
E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')
E_fitter = ds_fret.E_fitter
E_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
E_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))
E_fitter.fit_res[0].params.pretty_print()
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(E_fitter, ax=ax[0])
mfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))
display(E_fitter.params*100)
ds_fret.fit_E_m(weights='size')
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')
E_kde_w = E_fitter.kde_max_pos[0]
E_gauss_w = E_fitter.params.loc[0, 'center']
E_gauss_w_sig = E_fitter.params.loc[0, 'sigma']
E_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))
E_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr
E_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr
S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)
S_fitter = ds_fret.S_fitter
S_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
S_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(S_fitter, ax=ax[0])
mfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))
display(S_fitter.params*100)
S_kde = S_fitter.kde_max_pos[0]
S_gauss = S_fitter.params.loc[0, 'center']
S_gauss_sig = S_fitter.params.loc[0, 'sigma']
S_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))
S_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr
S_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr
S = ds_fret.S[0]
S_ml_fit = (S.mean(), S.std())
S_ml_fit
weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)
S_mean = np.dot(weights, S)/weights.sum()
S_std_dev = np.sqrt(
np.dot(weights, (S - S_mean)**2)/weights.sum())
S_wmean_fit = [S_mean, S_std_dev]
S_wmean_fit
sample = data_id
variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '
'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '
'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '
'E_pr_do_kde E_pr_do_hsm E_pr_do_gauss nt_mean\n')
variables_csv = variables.replace(' ', ',')
fmt_float = '{%s:.6f}'
fmt_int = '{%s:d}'
fmt_str = '{%s}'
fmt_dict = {**{'sample': fmt_str},
**{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}
var_dict = {name: eval(name) for name in variables.split()}
var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n'
data_str = var_fmt.format(**var_dict)
print(variables_csv)
print(data_str)
# NOTE: The file name should be the notebook name but with .csv extension
with open('results/usALEX-5samples-PR-raw-%s.csv' % ph_sel_name, 'a') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(variables_csv)
f.write(data_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load software and filenames definitions
Step2: Data folder
Step3: List of data files
Step4: Data load
Step5: Laser alternation selection
Step6: We need to define some parameters
Step7: We should check if everithing is OK with an alternation histogram
Step8: If the plot looks good we can apply the parameters with
Step9: Measurements infos
Step10: Or check the measurements duration
Step11: Compute background
Step12: Burst search and selection
Step14: Donor Leakage fit
Step15: Gaussian Fit
Step16: KDE maximum
Step17: Leakage summary
Step18: Burst size distribution
Step19: Fret fit
Step20: Weighted mean of $E$ of each burst
Step21: Gaussian fit (no weights)
Step22: Gaussian fit (using burst size as weights)
Step23: Stoichiometry fit
Step24: The Maximum likelihood fit for a Gaussian population is the mean
Step25: Computing the weighted mean and weighted standard deviation we get
Step26: Save data to file
Step27: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
Step28: This is just a trick to format the different variables
|
7,123
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import csv
import io
import urllib.request
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
url = 'http://radwatch.berkeley.edu/sites/default/files/dosenet/etch_roof.csv'
response = urllib.request.urlopen(url)
reader = csv.reader(io.TextIOWrapper(response))
timedata = []
counts = []
CPMerror = []
line = 0
for row in reader:
if line != 0:
timedata.append(datetime.fromtimestamp(float(row[2],)))
# 3rd column if CSV is a UNIX timestamp that can be converted
# to datetime via fromtimestamp
counts.append(float(row[6]))
CPMerror.append(float(row[7]))
line += 1
def month_bin(timedata, cpm, error):
# First, we initialize important values.
Year = [timedata[-1].year]
Month = [timedata[-1].month]
sumCPM = [0] # because mean = sum/total, we can just add CPM values as we iterate along the month
sumError = [0]
DataCount = [0] # variable to count total number of points per month
bin_num = 0 # variable to track current month; also tracks number of bins
for i in range(len(counts)-1,0,-1): # iterate from last point to first point on CSV in steps of -1
if Year[bin_num] == timedata[i].year: # if current bin year is same as iterated year
if Month[bin_num] == timedata[i].month: # if current bin month is same as iterated month, they belong in same bin!
sumCPM[bin_num] += counts[i]
sumError[bin_num] += CPMerror[i] # so we collect the data we need...
DataCount[bin_num] += 1
else:
Year.append(timedata[i].year) # if the months don't match:
Month.append(timedata[i].month)
sumCPM.append(0) # add another bin by appending 0 and increasing bin_num
sumError.append(0)
DataCount.append(0)
bin_num += 1
else: # if current bin year doesn't match iterated year:
Year.append(timedata[i].year)
Month.append(timedata[i].month) # add another bin by appending 0 and increasing bin_num
sumCPM.append(0)
sumError.append(0)
DataCount.append(0)
bin_num += 1
binnedCPM = np.array(sumCPM) / np.array(DataCount) # np.array allows us to perform element-by-elemtent division
avgError = np.array(sumError) / np.array(DataCount)
stdError = avgError / np.sqrt(DataCount) # standard error is average error divided by sqrt(# of elements)
strDates = [str(m)+'-'+str(n) for m,n in zip(Month,Year)] # convert Month Year values into string values for datetime
binnedDates = []
for i in range(0,len(Month)):
binnedDates.append(datetime.strptime(strDates[i],'%m-%Y')) # now everything is in the proper format!
fig, ax = plt.subplots()
ax.plot(binnedDates,binnedCPM, 'ro-')
ax.errorbar(binnedDates,binnedCPM, yerr=stdError, fmt='ro', ecolor='r')
plt.xticks(rotation=30)
plt.title('DoseNet: Time-Averaged CPM (Etcheverry Roof)')
plt.xlabel('Date')
plt.ylabel('Average CPM')
month_bin(timedata, counts, CPMerror)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this example, I will bin DoseNet data from our device on the Etcheverry Roof and average the data. Afterwards, I will plot the data to show the consequences of data binning. While I perform this technique with my own code, it is important to note that there are many built-in functions within Python and plenty of examples of data binning you can find online.
|
7,124
|
<ASSISTANT_TASK:>
Python Code:
dx = 0.3
x = np.arange(0, 10, dx) # returns [0, dx, 2dx, 3dx, 4dx, 5dx, ...]
print(x)
f1 = np.sin(x)
f2 = x**2/100
f3 = np.log(1+x)-1
fs = [f1, f2, f3]
for i in range(3): plt.plot(x, fs[i])
df1 = np.cos(x)
df2 = x/50
df3 = 1/(1+x)
dfs = [df1, df2, df3]
def derivative(f, dx):
return (f[1:] - f[:-1])/dx # returned function is one value shorter than input f
ndfs = [derivative(f, dx) for f in fs]
for i in range(3): plt.plot(x[:-1], ndfs[i], lw=3, alpha=0.5)
for i in range(3): plt.plot(x, dfs[i], '--k')
plt.show()
def central_derivative(f, dx):
return (f[2:] - f[:-2])/(2*dx) # returned function is two values shorter than input f
ndfs = [central_derivative(f, dx) for f in fs]
for i in range(3): plt.plot(x[1:-1], ndfs[i], lw=3, alpha=0.5)
for i in range(3): plt.plot(x, dfs[i], '--k')
plt.show()
def analytical_solution(t, k, d, x0, v0):
d += 0j # Exploiting complex numbers for one general solution
return np.real((np.exp(-((d*t)/2))*(np.sqrt(d**2-4*k)*x0*np.cosh(1/2*np.sqrt(d**2-4*k)*t)+(2*v0+d* \
x0)*np.sinh(1/2*np.sqrt(d**2-4*k)*t)))/np.sqrt(d**2-4*k))
dt = 0.02
t = np.arange(0,25,dt)
k = 5; d = 0.3; x0 = 0; v0 = 1
x = np.zeros_like(t)
v = np.zeros_like(t)
x[0] = x0; v[0] = v0
for i in range(len(t)-1): # Step through time with step size dt
v[i + 1] = v[i] + (-k * x[i] - d * v[i]) * dt
x[i + 1] = x[i] + v[i+1] * dt # v[i+1] makes a big stability difference!
plt.plot(t, x, lw=3, alpha=0.5)
plt.plot(t, analytical_solution(t, k, d, x0, v0), '--k')
plt.show()
dx = 0.01
x = np.arange(0, 1, dx)
n = len(x)
y0 = 1
y1 = 10
y = x**4 # not correct
L = np.diag(np.ones(n - 1), 1) + np.diag(np.ones(n - 1), -1) - 2 * np.diag(np.ones(n))
print(L)
L = L/dx**2
ypp = np.dot(L, y) # matrix product L y
plt.plot(x, ypp, lw=3, alpha=0.5)
plt.plot(x, 4*3*x**2, '--k')
plt.axis([0, 1, -3, 15])
plt.show()
I = np.eye(n)
A = L - I
A[0, :] = 0
A[0, 0] = 1
A[-1, :] = 0
A[-1, -1] = 1
b = np.zeros(n)
b[0] = y0
b[-1] = y1
np.set_printoptions(suppress=True)
print(np.round(A))
print('* y == ')
print(b)
from numpy.linalg import solve
y = solve(A, b)
analytical = (np.exp(-x)*(np.exp(1)*(np.exp(1)*y0-y1)+np.exp(2*x)*(-y0+np.exp(1)*y1)))/(-1+np.exp(2))
plt.plot(x, y, lw=3, alpha=0.5)
plt.plot(x, analytical, '--k')
plt.show()
dx = 0.01
x = np.arange(-np.pi, np.pi, dx)
y = 1/4*np.exp(-np.pi)/np.sinh(np.pi)*(-2+np.exp(x)*(1+2*np.exp(np.pi)+np.cos(x)+ \
np.sin(x)-np.exp(2*(np.pi))*(1+np.cos(x)+np.sin(x))))
plt.plot(x, y, '--k')
plt.axis([-np.pi, np.pi, -6, 1.5])
plt.show()
dx = 0.01
x = np.arange(0, 7, dx)
y = np.arange(-3, 3, dx)
X, Y = np.meshgrid(x, y)
U = np.exp(X) * np.sin(Y)
plt.imshow(U, extent=(min(x), max(x), max(y), min(y)))
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar()
plt.show()
print(X.shape, Y.shape, U.shape)
print(X)
dx = 0.02
x = np.arange(0, 1 + dx, dx)
m = len(x)
print(x[0], x[-1])
X, Y = np.meshgrid(x, x)
shape = X.shape
def to_vector(mat):
return np.ravel(mat)
def to_matrix(vec):
return np.reshape(vec, shape)
print(X.shape, '=>', to_vector(X).shape)
print((X == to_matrix(to_vector(X))).all())
print((Y == to_matrix(to_vector(Y))).all())
x = to_vector(X)
y = to_vector(Y)
n = len(x)
L = np.zeros((n, n))
for i in range(n):
L[i,i] = -4
j = np.argmin( (x[i] + dx - x)**2 + (y[i] - y)**2 ) # Find index j in vectors for point (x[i]+dx, y[i])
if i!=j: L[i,j] = 1 # If i==j, we are at the boundary of the domain
j = np.argmin( (x[i] - dx - x)**2 + (y[i] - y)**2 ) # Find index j in vectors for point (x[i]-dx, y[i])
if i!=j: L[i,j] = 1
j = np.argmin( (x[i] - x)**2 + (y[i] + dx - y)**2 ) # Find index j in vectors for point (x[i], y[i]+dx)
if i!=j: L[i,j] = 1
j = np.argmin( (x[i] - x)**2 + (y[i] - dx - y)**2 ) # Find index j in vectors for point (x[i], y[i]-dx)
if i!=j: L[i,j] = 1
print(L)
L = L/dx**2
L_quick = -4 * np.eye(n) + np.diag(np.ones(n-m), m) + np.diag(np.ones(n-m), -m)
a = np.ones(n-1); a[(m-1)::m] = 0
L_quick += np.diag(a,1) + np.diag(a,-1)
L_quick = L_quick/dx**2
print( (L == L_quick).all() )
b = np.zeros(n)
for i in range(n):
if (x[i]==0 or x[i]==1 or y[i]==0 or y[i]==1): # For any boundary point
L[i, :] = 0
L[i, i] = 1
# BC points that are not equal to zero:
if x[i] == 0:
b[i] = 1 - y[i]
elif y[i] == 0:
b[i] = 1
from scipy.linalg import solve
u = solve(L, b)
U = to_matrix(u)
plt.imshow(U, extent=(min(x), max(x), max(y), min(y)))
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar()
plt.title('Temperature distriubtion of plate')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let us see if we can calculate these derivatives numerically.
Step2: That worked pretty well, but we can do even better by using central differences for estimating the deriavtive.
Step3: The error is order dx for left/right derivatives, but only dx^2 for central derivatives.
Step4: Exercise
Step5: Imagine that we had calculated y. It would be some array
Step6: How would we calculate the vector y''? In particular, can we find a matrix L such that y'' = L y?
Step7: All looks good, expect the endpoints. This was expected. This is why we have boundary conditions.
Step8: Where A is defining the full problem.
Step9: The first row extracts y[0] and the equation then sets it equal to y0(=1) and the last row extracts y[-1] and sets it equal to y1(=10).
Step10: It is great for testing one's code that we can compare to analytical solutions, but of course the strength of numerical methods is that we can solve equations that we do not have analytical solutions for.
Step11: Extra exercise
Step12: In the above np.meshgrid has made the 2D arrays X,Y
Step13: Example problem
Step14: We wish to solve for U, but currently U, like X and Y, would be a matrix. Linear systems tend to work on vectors, so we should formulate our system such that we can solve for a vector.
Step15: So we define 1D arrays of our coordinates
Step16: The discrete Laplacian looks like this
Step17: As mentioned, this can be done much more efficiently (and without using loops).
Step18: Now we implement the boundary conditions
Step19: And lastly, we solve
|
7,125
|
<ASSISTANT_TASK:>
Python Code:
class Item(object):
def __init__(self, name, description, location):
self.name = name
self.description = description
self.location = location
def update_location(self, new_location):
pass
class Equipment(Item):
pass
class Consumable(Item):
def __init__(self, name, description, location, initial_quantity, current_quantity, storage_temp, flammability):
self.name = name
self.description = description
self.location = location
self.initial_quantity = initial_quantity
self.current_quantity = current_quantity
self.flammability = flammability
def update_quantity_remaining(self, amount):
pass
class Ingredient(object):
The ingredient object that contains nutritional information
def __init__(self, name, carbs, protein, fat):
self.name = name
self.carbs = carbs
self.protein = protein
self.fat = fat
def get_nutrition(self):
Returns the nutritional information for the ingredient
return (self.carbs, self.protein, self.fat)
class Recipe(object):
The Recipe object containing the ingredients
def __init__(self, name, ingredients):
self.name = name
self.ingredients = ingredients
def get_nutrition(self):
Returns the nutritional information for the recipe
nutrition = [0, 0, 0]
for amount, ingredient in self.ingredients:
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
return nutrition
bread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)),
(30, Ingredient('Oil', 0, 0, 1)),
(36, Ingredient('Sugar', 1, 0, 0)),
(7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),
(560, Ingredient('Water', 0, 0, 0))])
print(bread.ingredients)
print(bread.get_nutrition())
import requests
r = requests.get('https://api.github.com/repos/streety/biof509/events')
print(r.status_code)
print(r.headers['content-type'])
print(r.text[:1000])
print(r.json()[0]['payload']['commits'][0]['message'])
type(r)
import pandas as pd
data = pd.DataFrame([[0,1,2,3], [4,5,6,7], [8,9,10,11]], index=['a', 'b', 'c'], columns=['col1', 'col2', 'col3', 'col4'])
data
print(data.shape)
print(data['col1'])
print(data.col1)
import matplotlib.pyplot as plt
%matplotlib inline
data.plot()
data.to_csv('Wk05-temp.csv')
data2 = pd.read_csv('Wk05-temp.csv', index_col=0)
print(data2)
class Ingredient(object):
The ingredient object that contains nutritional information
def __init__(self, name, carbs, protein, fat):
self.name = name
self.carbs = carbs
self.protein = protein
self.fat = fat
def __repr__(self):
return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name, self.carbs, self.protein, self.fat)
def get_nutrition(self):
Returns the nutritional information for the ingredient
return (self.carbs, self.protein, self.fat)
class Recipe(object):
The Recipe object containing the ingredients
def __init__(self, name, ingredients):
self.name = name
self.ingredients = ingredients
def get_nutrition(self):
Returns the nutritional information for the recipe
nutrition = [0, 0, 0]
for amount, ingredient in self.ingredients:
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
return nutrition
bread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)),
(30, Ingredient('Oil', 0, 0, 1)),
(36, Ingredient('Sugar', 1, 0, 0)),
(7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),
(560, Ingredient('Water', 0, 0, 0))])
print(bread.ingredients)
print(bread.get_nutrition())
class Ingredient(object):
The ingredient object that contains nutritional information
def __init__(self, name, carbs, protein, fat):
self.name = name
self.carbs = carbs
self.protein = protein
self.fat = fat
def __repr__(self):
return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name, self.carbs, self.protein, self.fat)
def get_nutrition(self):
Returns the nutritional information for the ingredient
return (self.carbs, self.protein, self.fat)
class Recipe(object):
The Recipe object containing the ingredients
def __init__(self, name, ingredients):
self.name = name
self.ingredients = ingredients
def get_nutrition(self):
Returns the nutritional information for the recipe
nutrition = [0, 0, 0]
for amount, ingredient in self.ingredients:
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
return nutrition
bread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)),
(30, Ingredient('Oil', 0, 0, 1)),
(36, Ingredient('Sugar', 1, 0, 0)),
(7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),
(560, Ingredient('Water', 0, 0, 0))])
print(bread.ingredients)
print(bread.get_nutrition())
!cat Wk05-wsgi.py
class Ingredient(object):
The ingredient object that contains nutritional information
def __init__(self, name, *args, **kwargs):
self.name = name
self.nums = []
for a in [*args]:
if isinstance(a, dict):
for key in a.keys():
setattr(self, key, a[key])
elif isinstance(a, float):
self.nums.append(a)
if len(self.nums) in [3,4]:
for n, val in zip(['carbs', 'protein', 'fat', 'cholesterol'], self.nums):
setattr(self, n, val)
elif isinstance(a, int):
self.nums.append(a)
if len(self.nums) in [3,4]:
for n, val in zip(['carbs', 'protein', 'fat', 'cholesterol'], self.nums):
setattr(self, n, val)
else:
print('Need correct nutritional information format')
def __repr__(self):
if getattr(self, 'cholesterol', False):
return 'Ingredient({0}, {1}, {2}, {3}, {4})'.format(self.name,
self.carbs,
self.protein,
self.fat,
self.cholesterol)
else:
return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name,
self.carbs,
self.protein,
self.fat)
def get_nutrition(self):
Returns the nutritional information for the ingredient
return (self.carbs, self.protein, self.fat, self.cholestrol)
def get_name(self):
Returns the ingredient name
return self.name
class Recipe(object):
The Recipe object containing the ingredients
def __init__(self, name, *ingredients):
self.name = name
self.ingredients = [*ingredients][0]
self.number = len(*ingredients)
self.nutrition_ = {'carbs': 0, 'protein': 0, 'fat':0, 'cholesterol':0}
def __repr__(self):
return 'Recipe({0}, {1})'.format(self.name, self.ingredients)
def get_nutrition(self):
Returns the nutritional information for the recipe
#for _ in range(self.number):
nutrition = [0,0,0,0] # need to be length of dict
for amount, ingredient in self.ingredients:
# print(type(ingredient), ingredient) # test
try:
if getattr(ingredient, 'cholesterol', False):
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
nutrition[3] += amount * ingredient.cholesterol
else:
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
except AttributeError: # in case another recipe is in the ingredients (nested)
nu = ingredient.get_nutrition()
nu = [amount * x for x in nu]
nutrition[0] += nu[0]
nutrition[1] += nu[1]
nutrition[2] += nu[2]
nutrition[3] += nu[3]
return nutrition
@property
def nutrition(self):
facts = self.get_nutrition()
self.nutrition_['carbs'] = facts[0]
self.nutrition_['protein'] = facts[1]
self.nutrition_['fat'] = facts[2]
self.nutrition_['cholesterol'] = facts[3]
return self.nutrition_
def get_name(self):
return self.name
bread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)),
(30, Ingredient('Oil', 0, 0, 1)),
(36, Ingredient('Sugar', 1, 0, 0)),
(7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),
(560, Ingredient('Water', 0, 0, 0))])
print(bread.ingredients)
# Should be roughly [(820, Ingredient(Flour, 0.77, 0.1, 0.01)), (30, Ingredient(Oil, 0, 0, 1)),
# (36, Ingredient(Sugar, 1, 0, 0)), (7, Ingredient(Yeast, 0.3125, 0.5, 0.0625)), (560, Ingredient(Water, 0, 0, 0))]
print(bread.nutrition)
#Should be roughly {'carbs': 669.5875, 'protein': 85.5, 'fat': 38.6375} the order is not important
eggs = Ingredient('Egg', {'carbs': 0.0077, 'protein': 0.1258, 'fat': 0.0994, 'cholesterol': 0.00423, 'awesome':100})
#eggs = Ingredient('Egg', {'carbs': 0.0077, 'protein': 0.1258, 'fat': 0.0994})
#eggs = Ingredient('Egg', 0.0077, 0.1258, 0.0994, 0.00423)
print(eggs)
#Points to note:
# - The different call to Ingredient, you can use isinstance or type to change the
# behaviour depending on the arguments supplied
# - Cholesterol as an extra nutrient, your implementation should accept any nutrient
# - Use of Recipe (bread) as an ingredient
basic_french_toast = Recipe('Basic French Toast', [(300, Ingredient('Egg', {'carbs': 0.0077, 'protein': 0.1258,
'fat': 0.0994, 'cholesterol': 0.00423})),
(0.25, bread)])
print(basic_french_toast.ingredients)
# Should be roughly:
# [(300, Ingredient(Egg, 0.0077, 0.1258, 0.0994)), (0.25, Recipe(Bread, [(820, Ingredient(Flour, 0.77, 0.1, 0.01)),
# (30, Ingredient(Oil, 0, 0, 1)), (36, Ingredient(Sugar, 1, 0, 0)), (7, Ingredient(Yeast, 0.3125, 0.5, 0.0625)),
# (560, Ingredient(Water, 0, 0, 0))]))]
# Note the formatting for the Recipe object, a __repr__ method will be needed
print(basic_french_toast.nutrition)
# Should be roughly {'protein': 59.115, 'carbs': 169.706875, 'cholesterol': 1.2690000000000001, 'fat': 39.479375000000005}
# The order is not important
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Composition
Step6: This has the basic functionality implemented but there are some improvements we can make.
Step7: The API documentation for requests
Step12: The API documentation for the DataFrame object.
Step17: Viewing the ingredients now looks much better. Let's now look at the get_nutrition method.
Step18: WSGI
Step24: Assignments
|
7,126
|
<ASSISTANT_TASK:>
Python Code:
%reload_ext watermark
%watermark -p networkx
import networkx as nx
from networkx.algorithms.community import k_clique_communities, girvan_newman
import matplotlib.pyplot as plt
%matplotlib inline
GA = nx.read_gexf('../data/ga_graph.gexf')
gn_comm = girvan_newman(GA)
first_iteration_comm = tuple(sorted(c) for c in next(gn_comm))
dict(enumerate(first_iteration_comm))
def map_communities(G, communities):
Return a mapping of community membership from a community set tuple
community_map = {}
for node in G.nodes():
for i, comm in enumerate(communities):
if node in comm:
community_map[node] = i
if community_map.get(node, None) is None:
community_map[node] = None
return community_map
from helpers import create_color_map
community_map = map_communities(GA, first_iteration_comm)
nx.set_node_attributes(GA, 'community', community_map)
node_colors, color_map, palette = create_color_map(GA, 'community')
nx.draw(GA, node_color=node_colors, with_labels=True)
second_comm = tuple(sorted(c) for c in next(gn_comm))
community_map_2 = map_communities(GA, second_comm)
nx.set_node_attributes(GA, 'community two', community_map_2)
node_colors, color_map, palette = create_color_map(GA, 'community two')
nx.draw(GA, node_color=node_colors, with_labels=True)
k_clique = k_clique_communities(GA, 2)
dict(enumerate(k_clique))
k_clique = k_clique_communities(GA, 3)
dict(enumerate(k_clique))
print("Percent of ALL edges that could exist: %0.2f" % (nx.density(GA) * 100))
Karate = nx.karate_club_graph()
gn_comm = girvan_newman(Karate)
first_comm = tuple(sorted(c) for c in next(gn_comm))
community_map = map_communities(Karate, first_comm)
nx.set_node_attributes(Karate, 'community gn', community_map)
node_colors, color_map, palette = create_color_map(Karate, 'community gn')
nx.draw(Karate, node_color=node_colors, with_labels=True)
k_clique = k_clique_communities(Karate, 3)
k_clique_comm = [list(community) for community in k_clique]
community_map = map_communities(Karate, k_clique_comm)
nx.set_node_attributes(Karate, 'community k-clique', community_map)
node_colors, color_map, palette = create_color_map(Karate, 'community k-clique')
nx.draw(Karate, node_color=node_colors, with_labels=True)
import pandas as pd
club_community = [Karate.node[node] for node in Karate.nodes()]
club_df = pd.DataFrame(club_community)
pd.crosstab(club_df['club'], club_df['community gn'])
pd.crosstab(club_df['club'], club_df['community k-clique'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Girvan Newman Algorithm
Step3: K-Clique Communities
Step4: Karate Club Time
Step5: Validation
|
7,127
|
<ASSISTANT_TASK:>
Python Code:
!pip install --user apache-beam[gcp]
import os
import googleapiclient.discovery
import shutil
from google.cloud import bigquery
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Dense, DenseFeatures
from tensorflow.keras.models import Sequential
print(tf.__version__)
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# For Bash Code
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
bq = bigquery.Client()
dataset = bigquery.Dataset(bq.dataset("taxifare"))
try:
bq.create_dataset(dataset) # will fail if dataset already exists
print("Dataset created.")
except:
print("Dataset already exists.")
dataset = bigquery.Dataset(bq.dataset("taxifare"))
table_ref = dataset.table("traffic_realtime")
SCHEMA = [
bigquery.SchemaField("trips_last_5min", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("time", "TIMESTAMP", mode="REQUIRED"),
]
table = bigquery.Table(table_ref, schema=SCHEMA)
try:
bq.create_table(table)
print("Table created.")
except:
print("Table already exists.")
%load_ext google.cloud.bigquery
%%bigquery
SELECT
*
FROM
`taxifare.traffic_realtime`
ORDER BY
time DESC
LIMIT 10
# TODO 2a. Write a function to take most recent entry in `traffic_realtime` table and add it to instance.
def add_traffic_last_5min(instance):
bq = bigquery.Client()
query_string =
SELECT
*
FROM
`taxifare.traffic_realtime`
ORDER BY
time DESC
LIMIT 1
trips = bq.query(query_string).to_dataframe()['trips_last_5min'][0]
instance['traffic_last_5min'] = int(trips)
return instance
add_traffic_last_5min(instance={'dayofweek': 4,
'hourofday': 13,
'pickup_longitude': -73.99,
'pickup_latitude': 40.758,
'dropoff_latitude': 41.742,
'dropoff_longitude': -73.07})
# TODO 2b. Write code to call prediction on instance using realtime traffic info.
#Hint: Look at the "Serving online predictions" section of this page https://cloud.google.com/ml-engine/docs/tensorflow/custom-prediction-routine-keras
MODEL_NAME = 'taxifare'
VERSION_NAME = 'traffic'
service = googleapiclient.discovery.build('ml', 'v1', cache_discovery=False)
name = 'projects/{}/models/{}/versions/{}'.format(PROJECT,
MODEL_NAME,
VERSION_NAME)
instance = add_traffic_last_5min({'dayofweek': 4,
'hourofday': 13,
'pickup_longitude': -73.99,
'pickup_latitude': 40.758,
'dropoff_latitude': 41.742,
'dropoff_longitude': -73.07})
response = service.projects().predict(
name=name,
body={'instances': [instance]}
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
else:
print(response['predictions'][0]['output_1'][0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the kernel before proceeding further (On the Notebook menu - Kernel - Restart Kernel).
Step2: Re-train our model with trips_last_5min feature
Step3: Next, we create a table called traffic_realtime and set up the schema.
Step4: Launch Streaming Dataflow Pipeline
Step6: Make predictions from the new data
Step7: The traffic_realtime table is updated in realtime using Cloud Pub/Sub and Dataflow so, if you run the cell below periodically, you should see the traffic_last_5min feature added to the instance and change over time.
Step8: Finally, we'll use the python api to call predictions on an instance, using the realtime traffic information in our prediction. Just as above, you should notice that our resulting predicitons change with time as our realtime traffic information changes as well.
|
7,128
|
<ASSISTANT_TASK:>
Python Code:
import gzip
import cPickle as pickle
with gzip.open("../data/train.pklz", "rb") as train_file:
train_set = pickle.load(train_file)
with gzip.open("../data/test.pklz", "rb") as test_file:
test_set = pickle.load(test_file)
with gzip.open("../data/questions.pklz", "rb") as questions_file:
questions = pickle.load(questions_file)
X = []
Y = []
for key in train_set:
# We only care about positive case at this time
if train_set[key]['position'] < 0:
continue
uid = train_set[key]['uid']
qid = train_set[key]['qid']
pos = train_set[key]['position']
q_length = max(questions[qid]['pos_token'].keys())
feat = [uid, qid, q_length]
X.append(feat)
Y.append([pos])
print len(X)
print len(Y)
print X[0], Y[0]
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.cross_validation import train_test_split, cross_val_score
X_train, X_test, Y_train, Y_test = train_test_split (X, Y)
regressor = LinearRegression()
scores = cross_val_score(regressor, X, Y, cv=10)
print 'Cross validation r-squared scores:', scores.mean()
print scores
regressor = Ridge()
scores = cross_val_score(regressor, X, Y, cv=10)
print 'Cross validation r-squared scores:', scores.mean()
print scores
regressor = Lasso()
scores = cross_val_score(regressor, X, Y, cv=10)
print 'Cross validation r-squared scores:', scores.mean()
print scores
regressor = ElasticNet()
scores = cross_val_score(regressor, X, Y, cv=10)
print 'Cross validation r-squared scores:', scores.mean()
print scores
from sklearn.linear_model import SGDRegressor
from sklearn.cross_validation import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
X_scaler = StandardScaler()
Y_scaler = StandardScaler()
X_train, X_test, Y_train, Y_test = train_test_split (X, Y)
X_train = X_scaler.fit_transform(X_train)
Y_train = Y_scaler.fit_transform(Y_train)
X_test = X_scaler.fit_transform(X_test)
Y_test = Y_scaler.fit_transform(Y_test)
regressor = SGDRegressor(loss='squared_loss', penalty='l1')
scores = cross_val_score(regressor, X_train, Y_train, cv=10)
print 'Cross validation r-squared scores:', scores.mean()
print scores
X_test = []
test_id = []
for key in test_set:
test_id.append(key)
uid = test_set[key]['uid']
qid = test_set[key]['qid']
q_length = max(questions[qid]['pos_token'].keys())
feat = [uid, qid, q_length]
X_test.append(feat)
X_scaler = StandardScaler()
Y_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X)
Y_train = Y_scaler.fit_transform(Y)
X_test = X_scaler.fit_transform(X_test)
regressor.fit(X_train, Y_train)
predictions = regressor.predict(X_test)
predictions = Y_scaler.inverse_transform(predictions)
predictions = sorted([[id, predictions[index]] for index, id in enumerate(test_id)])
print len(predictions)
predictions[:5]
import csv
predictions.insert(0,["id", "position"])
with open('guess.csv', 'wb') as fp:
writer = csv.writer(fp, delimiter=',')
writer.writerows(predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make training set
Step2: It means that user 0 tried to solve question number 1 which has 77 tokens for question and he or she answered at 61st token.
Step3: http
Step4: Here is 4749 predictions.
|
7,129
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_raw.fif'
fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = meg_path / 'labels' / f'{label_name}.label'
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
freqs = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = freqs / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and the inter-trial coherence
power, itc = source_induced_power(
this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
itc = np.mean(itc, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(itc,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('ITC (%s)' % title)
plt.colorbar()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
|
7,130
|
<ASSISTANT_TASK:>
Python Code:
import kfp
import kfp.gcp as gcp
import kfp.dsl as dsl
import kfp.compiler as compiler
import kfp.components as comp
import datetime
import kubernetes as k8s
# Required Parameters
PROJECT_ID='<ADD GCP PROJECT HERE>'
GCS_BUCKET='gs://<ADD STORAGE LOCATION HERE>'
# Optional Parameters, but required for running outside Kubeflow cluster
# The host for 'AI Platform Pipelines' ends with 'pipelines.googleusercontent.com'
# The host for pipeline endpoint of 'full Kubeflow deployment' ends with '/pipeline'
# Examples are:
# https://7c021d0340d296aa-dot-us-central2.pipelines.googleusercontent.com
# https://kubeflow.endpoints.kubeflow-pipeline.cloud.goog/pipeline
HOST = '<ADD HOST NAME TO TALK TO KUBEFLOW PIPELINE HERE>'
# For 'full Kubeflow deployment' on GCP, the endpoint is usually protected through IAP, therefore the following
# will be needed to access the endpoint.
CLIENT_ID = '<ADD OAuth CLIENT ID USED BY IAP HERE>'
OTHER_CLIENT_ID = '<ADD OAuth CLIENT ID USED TO OBTAIN AUTH CODES HERE>'
OTHER_CLIENT_SECRET = '<ADD OAuth CLIENT SECRET USED TO OBTAIN AUTH CODES HERE>'
# This is to ensure the proper access token is present to reach the end point for 'AI Platform Pipelines'
# If you are not working with 'AI Platform Pipelines', this step is not necessary
! gcloud auth print-access-token
# Create kfp client
in_cluster = True
try:
k8s.config.load_incluster_config()
except:
in_cluster = False
pass
if in_cluster:
client = kfp.Client()
else:
if HOST.endswith('googleusercontent.com'):
CLIENT_ID = None
OTHER_CLIENT_ID = None
OTHER_CLIENT_SECRET = None
client = kfp.Client(host=HOST,
client_id=CLIENT_ID,
other_client_id=OTHER_CLIENT_ID,
other_client_secret=OTHER_CLIENT_SECRET)
%%bash
# Create folders if they don't exist.
mkdir -p tmp/reuse_components/mnist_training
# Create the Python file that lists GCS blobs.
cat > ./tmp/reuse_components/mnist_training/app.py <<HERE
import argparse
from datetime import datetime
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_file', type=str, required=True, help='Name of the model file.')
parser.add_argument(
'--bucket', type=str, required=True, help='GCS bucket name.')
args = parser.parse_args()
bucket=args.bucket
model_file=args.model_file
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=bucket + '/logs/' + datetime.now().date().__str__()),
# Interrupt training if val_loss stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
]
model.fit(x_train, y_train, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(x_test, y_test))
model.save(model_file)
from tensorflow import gfile
gcs_path = bucket + "/" + model_file
if gfile.Exists(gcs_path):
gfile.Remove(gcs_path)
gfile.Copy(model_file, gcs_path)
with open('/output.txt', 'w') as f:
f.write(gcs_path)
HERE
%%bash
# Create Dockerfile.
cat > ./tmp/reuse_components/mnist_training/Dockerfile <<EOF
FROM tensorflow/tensorflow:1.15.0-py3
WORKDIR /app
COPY . /app
EOF
IMAGE_NAME="mnist_training_kf_pipeline"
TAG="latest" # "v_$(date +%Y%m%d_%H%M%S)"
GCR_IMAGE="gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}".format(
PROJECT_ID=PROJECT_ID,
IMAGE_NAME=IMAGE_NAME,
TAG=TAG
)
APP_FOLDER='./tmp/reuse_components/mnist_training/'
# In the following, for the purpose of demonstration
# Cloud Build is choosen for 'AI Platform Pipelines'
# kaniko is choosen for 'full Kubeflow deployment'
if HOST.endswith('googleusercontent.com'):
# kaniko is not pre-installed with 'AI Platform Pipelines'
import subprocess
# ! gcloud builds submit --tag ${IMAGE_NAME} ${APP_FOLDER}
cmd = ['gcloud', 'builds', 'submit', '--tag', GCR_IMAGE, APP_FOLDER]
build_log = (subprocess.run(cmd, stdout=subprocess.PIPE).stdout[:-1].decode('utf-8'))
print(build_log)
else:
if kfp.__version__ <= '0.1.36':
# kfp with version 0.1.36+ introduce broken change that will make the following code not working
import subprocess
builder = kfp.containers._container_builder.ContainerBuilder(
gcs_staging=GCS_BUCKET + "/kfp_container_build_staging"
)
kfp.containers.build_image_from_working_dir(
image_name=GCR_IMAGE,
working_dir=APP_FOLDER,
builder=builder
)
else:
raise("Please build the docker image use either [Docker] or [Cloud Build]")
image_name = GCR_IMAGE
%%bash -s "{image_name}"
GCR_IMAGE="${1}"
echo ${GCR_IMAGE}
# Create Yaml
# the image uri should be changed according to the above docker image push output
cat > mnist_component.yaml <<HERE
name: Mnist training
description: Train a mnist model and save to GCS
inputs:
- name: model_file
description: 'Name of the model file.'
type: String
- name: bucket
description: 'GCS bucket name.'
type: String
outputs:
- name: model_path
description: 'Trained model path.'
type: GCSPath
implementation:
container:
image: ${GCR_IMAGE}
command: [
python, /app/app.py,
--model_file, {inputValue: model_file},
--bucket, {inputValue: bucket},
]
fileOutputs:
model_path: /output.txt
HERE
import os
mnist_train_op = kfp.components.load_component_from_file(os.path.join('./', 'mnist_component.yaml'))
mnist_train_op.component_spec
# Define the pipeline
@dsl.pipeline(
name='Mnist pipeline',
description='A toy pipeline that performs mnist model training.'
)
def mnist_reuse_component_pipeline(
model_file: str = 'mnist_model.h5',
bucket: str = GCS_BUCKET
):
mnist_train_op(model_file=model_file, bucket=bucket).apply(gcp.use_gcp_secret('user-gcp-sa'))
return True
pipeline_func = mnist_reuse_component_pipeline
experiment_name = 'minist_kubeflow'
arguments = {"model_file":"mnist_model.h5",
"bucket":GCS_BUCKET}
run_name = pipeline_func.__name__ + ' run'
# Submit pipeline directly from pipeline function
run_result = client.create_run_from_pipeline_func(pipeline_func,
experiment_name=experiment_name,
run_name=run_name,
arguments=arguments)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create client
Step2: Writing the program code
Step3: Create a Docker container
Step4: Build docker image
Step5: If you want to use docker to build the image
Step6: Writing your component definition file
Step7: Create your workflow as a Python function
Step8: Submit a pipeline run
|
7,131
|
<ASSISTANT_TASK:>
Python Code:
test_data_df.head()
train_data_df.Sentiment.value_counts()
import numpy as np
np.mean([len(s.split(" ")) for s in train_data_df.Text])
import re, nltk
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
# remove non letters
text = re.sub("[^a-zA-Z]", " ", text)
# tokenize
tokens = nltk.word_tokenize(text)
# stem
stems = stem_tokens(tokens, stemmer)
return stems
########
vectorizer = CountVectorizer(
analyzer = 'word',
tokenizer = tokenize,
lowercase = True,
stop_words = 'english',
max_features = 100
)
corpus_data_features = vectorizer.fit_transform(train_data_df.Text.tolist() + test_data_df.Text.tolist())
corpus_data_features_nd = corpus_data_features.toarray()
corpus_data_features_nd.shape
vocab = vectorizer.get_feature_names()
print(vocab)
dist = np.sum(corpus_data_features_nd, axis=0)
for tag, count in zip(vocab, dist):
print(count, tag)
from sklearn.cross_validation import train_test_split
# remember that corpus_data_features_nd contains all of our original train and test data, so we need to exclude
# the unlabeled test entries
X_train, X_test, y_train, y_test = train_test_split(
corpus_data_features_nd[0:len(train_data_df)],
train_data_df.Sentiment,
train_size=0.85,
random_state=1234)
print(X_test[0])
from sklearn.linear_model import LogisticRegression
log_model = LogisticRegression()
log_model = log_model.fit(X=X_train, y=y_train)
y_pred = log_model.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
log_model = LogisticRegression()
log_model = log_model.fit(X=corpus_data_features_nd[0:len(train_data_df)], y=train_data_df.Sentiment)
test_pred = log_model.predict(corpus_data_features_nd[len(train_data_df):])
actual_pred = test_data_df["Sentiment"].tolist()
print(classification_report(test_pred, actual_pred))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's count how many labels do we have for each sentiment class.
Step2: Finally, let's calculate the average number of words per sentence. We could do the following using a list comprehension with the number of words per sentence.
Step3: First we need to init the vectorizer. We need to remove puntuations, lowercase, remove stop words, and stem words. All these steps can be directly performed by CountVectorizer if we pass the right parameter values. We can do as follows.
Step4: Numpy arrays are easy to work with, so convert the result to an array.
Step5: We can also print the counts of each word in the vocabulary as follows.
Step6: A bag-of-words linear classifier
Step7: Now we are ready to train our classifier.
Step8: Now we use the classifier to label our evaluation set. We can use either predict for classes or predict_proba for probabilities.
Step9: Finally, we can re-train our model with all the training data and use it for sentiment classification with the original (unlabeled) test set.
|
7,132
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact
def char_probs(s):
Find the probabilities of the unique characters in the string s.
Parameters
----------
s : str
A string of characters.
Returns
-------
probs : dict
A dictionary whose keys are the unique characters in s and whose values
are the probabilities of those characters.
a = {}
for n in s:
if n in s:
i = s.count(n)
a[n] = i
b = a
for m in b:
b[m] = a[m]/len(s)
return b
test1 = char_probs('aaaa')
assert np.allclose(test1['a'], 1.0)
test2 = char_probs('aabb')
assert np.allclose(test2['a'], 0.5)
assert np.allclose(test2['b'], 0.5)
test3 = char_probs('abcd')
assert np.allclose(test3['a'], 0.25)
assert np.allclose(test3['b'], 0.25)
assert np.allclose(test3['c'], 0.25)
assert np.allclose(test3['d'], 0.25)
def entropy(d):
Compute the entropy of a dict d whose values are probabilities.
c = np.array(d.values)
H = -max(np.cumsum(np.dot(c,np.log2(c))))
return H
entropy({'a':0.5,'b':0.5})
f={'a':0.5,'b':0.5}
f.values
ff = np.array(f)
ff
assert np.allclose(entropy({'a': 0.5, 'b': 0.5}), 1.0)
assert np.allclose(entropy({'a': 1.0}), 0.0)
interact(entropy(char_probs), s='Type string here')
assert True # use this for grading the pi digits histogram
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Character counting and entropy
Step4: The entropy is a quantiative measure of the disorder of a probability distribution. It is used extensively in Physics, Statistics, Machine Learning, Computer Science and Information Science. Given a set of probabilities $P_i$, the entropy is defined as
Step5: Use IPython's interact function to create a user interface that allows you to type a string into a text box and see the entropy of the character probabilities of the string.
|
7,133
|
<ASSISTANT_TASK:>
Python Code:
from siphon.simplewebservice.ndbc import NDBC
data_types = NDBC.buoy_data_types('46042')
print(data_types)
df = NDBC.realtime_observations('46042')
df.tail()
df = df.dropna(axis='columns', how='all')
df.head()
# Your code goes here
# supl_obs =
# %load solutions/get_obs.py
import pandas as pd
idx = df.time >= (pd.Timestamp.utcnow() - pd.Timedelta(days=7))
df = df[idx]
df.head()
df.reset_index(drop=True, inplace=True)
df.head()
# Convention for import of the pyplot interface
import matplotlib.pyplot as plt
# Set-up to have matplotlib use its support for notebook inline plots
%matplotlib inline
plt.rc('font', size=12)
fig, ax = plt.subplots(figsize=(10, 6))
# Specify how our lines should look
ax.plot(df.time, df.wind_speed, color='tab:orange', label='Windspeed')
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Speed (m/s)')
ax.set_title('Buoy Wind Data')
ax.grid(True)
ax.legend(loc='upper left');
# Helpers to format and locate ticks for dates
from matplotlib.dates import DateFormatter, DayLocator
# Set the x-axis to do major ticks on the days and label them like '07/20'
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
fig
# Use linestyle keyword to style our plot
ax.plot(df.time, df.wind_gust, color='tab:olive', linestyle='--',
label='Wind Gust')
# Redisplay the legend to show our new wind gust line
ax.legend(loc='upper left')
fig
# Your code goes here
# %load solutions/basic_plot.py
# plot pressure data on same figure
ax.plot(df.time, df.pressure, color='black', label='Pressure')
ax.set_ylabel('Pressure')
ax.legend(loc='upper left')
fig
fig, ax = plt.subplots(figsize=(10, 6))
axb = ax.twinx()
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Speed (m/s)')
ax.set_title('Buoy Data')
ax.grid(True)
# Plotting on the first y-axis
ax.plot(df.time, df.wind_speed, color='tab:orange', label='Windspeed')
ax.plot(df.time, df.wind_gust, color='tab:olive', linestyle='--', label='Wind Gust')
ax.legend(loc='upper left');
# Plotting on the second y-axis
axb.set_ylabel('Pressure (hPa)')
axb.plot(df.time, df.pressure, color='black', label='pressure')
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%b %d'))
fig, ax = plt.subplots(figsize=(10, 6))
axb = ax.twinx()
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Speed (m/s)')
ax.set_title('Buoy 41056 Wind Data')
ax.grid(True)
# Plotting on the first y-axis
ax.plot(df.time, df.wind_speed, color='tab:orange', label='Windspeed')
ax.plot(df.time, df.wind_gust, color='tab:olive', linestyle='--', label='Wind Gust')
# Plotting on the second y-axis
axb.set_ylabel('Pressure (hPa)')
axb.plot(df.time, df.pressure, color='black', label='pressure')
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%b %d'))
# Handling of getting lines and labels from all axes for a single legend
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = axb.get_legend_handles_labels()
axb.legend(lines + lines2, labels + labels2, loc='upper left');
# Your code goes here
# %load solutions/adv_plot.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this case, we'll just stick with the standard meteorological data. The "realtime" data from NDBC contains approximately 45 days of data from each buoy. We'll retreive that record for buoy 51002 and then do some cleaning of the data.
Step2: Let's get rid of the columns with all missing data. We could use the drop method and manually name all of the columns, but that would require us to know which are all NaN and that sounds like manual labor - something that programmers hate. Pandas has the dropna method that allows us to drop rows or columns where any or all values are NaN. In this case, let's drop all columns with all NaN values.
Step3: <div class="alert alert-success">
Step4: Solution
Step5: Finally, we need to trim down the data. The file contains 45 days worth of observations. Let's look at the last week's worth of data.
Step6: We're almost ready, but now the index column is not that meaningful. It starts at a non-zero row, which is fine with our initial file, but let's re-zero the index so we have a nice clean data frame to start with.
Step7: <a href="#top">Top</a>
Step8: We'll start by plotting the windspeed observations from the buoy.
Step9: Our x axis labels look a little crowded - let's try only labeling each day in our time series.
Step10: Now we can add wind gust speeds to the same plot as a dashed yellow line.
Step11: <div class="alert alert-success">
Step12: Solution
Step13: <a href="#top">Top</a>
Step14: That is less than ideal. We can't see detail in the data profiles! We can create a twin of the x-axis and have a secondary y-axis on the right side of the plot. We'll create a totally new figure here.
Step15: We're closer, but the data are plotting over the legend and not included in the legend. That's because the legend is associated with our primary y-axis. We need to append that data from the second y-axis.
Step16: <div class="alert alert-success">
Step17: Solution
|
7,134
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sys
sys.path.append('../..')
from matplotlib import pylab
pylab.rcParams['figure.figsize'] = 16, 10
import functools
import numpy
import scipy
import scipy.special
import time
from crocodile.clean import *
from crocodile.synthesis import *
from crocodile.simulate import *
from util.visualize import *
from arl.test_support import create_named_configuration
vlas = create_named_configuration('VLAA')
ha_range = numpy.arange(numpy.radians(0),
numpy.radians(90),
numpy.radians(90 / 36))
dec = numpy.radians(45)
vobs = xyz_to_baselines(vlas.data['xyz'], ha_range, dec)
# Wavelength: 5 metres
wvl=5
uvw = vobs / wvl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
ax = plt.figure().add_subplot(121, projection='3d')
ax.scatter(uvw[:,0], uvw[:,1] , uvw[:,2])
max_uvw = numpy.amax(uvw)
ax.set_xlabel('U [$\lambda$]'); ax.set_xlim((-max_uvw, max_uvw))
ax.set_ylabel('V [$\lambda$]'); ax.set_ylim((-max_uvw, max_uvw))
ax.set_zlabel('W [$\lambda$]'); ax.set_zlim((-max_uvw, max_uvw))
ax.view_init(20, 20)
pylab.show()
import itertools
vis = numpy.zeros(len(uvw), dtype=complex)
for u,v in itertools.product(range(-3, 4), range(-3, 4)):
vis += 1.0*simulate_point(uvw, 0.010*u, 0.010*v)
plt.clf()
uvdist=numpy.sqrt(uvw[:,0]**2+uvw[:,1]**2)
plt.plot(uvdist, numpy.abs(vis), '.', color='r')
# Imaging parameterisation
theta = 2*0.05
lam = 18000
wstep = 100
npixkern = 31
grid_size = int(numpy.ceil(theta*lam))
# Determine weights (globally)
wt = doweight(theta, lam, uvw, numpy.ones(len(uvw)))
# Depending on algorithm we are going to prefer different uvw-distributions,
# so make decision about conjugation of visibilities flexible.
def flip_conj(where):
# Conjugate visibility. This does not change its meaning.
uvw[where] = -uvw[where]
vis[where] = numpy.conj(vis[where])
# Determine w-planes
wplane = numpy.around(uvw[:,2] / wstep).astype(int)
return uvw, vis, numpy.arange(numpy.min(wplane), numpy.max(wplane)+1), wplane
image_sum = numpy.zeros((grid_size, grid_size), dtype=complex)
w_grids = {}
uvw,vis,wplanes,wplane = flip_conj(uvw[:,2] < 0.0)
start_time = time.time()
for wp in wplanes:
# Filter out w-plane
puvw = uvw[wplane == wp]
if len(puvw) == 0: continue
pvis = vis[wplane == wp]
pwt = wt[wplane == wp]
midw = numpy.mean(puvw[:,2])
print("w-plane %d: %d visibilities, %.1f average w" % (wp, len(puvw), midw))
# Translate w-coordinate (not needed for simple imaging though)
#puvw = numpy.array(puvw)
#puvw[:,2] -= midw
src = numpy.ndarray((len(pvis), 0))
# Make image
cdrt = simple_imaging(theta, lam, puvw, src, pvis * pwt)
l,m = theta*coordinates2(grid_size)
# Multiply by Fresnel pattern in image space, add
wkern = w_kernel_function(l, m, midw)
w_grids[wp] = ifft(cdrt) / wkern
image_sum += w_grids[wp]
print("Done in %.1fs" % (time.time() - start_time))
# We only used half of the visibilities, so the image is not going to
# end up real-valued. However, we can easily just remove the unused imaginary
# parts and multiply by 2 to arrive at the correct result.
show_image(2.0*numpy.real(image_sum), "image", theta)
start_time = time.time()
uvw,vis,wplanes,wplane = flip_conj(uvw[:,1] < 0.0)
grid_sum = numpy.zeros((grid_size, grid_size), dtype=complex)
for wp in wplanes:
# Filter out w-plane
puvw = uvw[wplane == wp]
if len(puvw) == 0: continue
pvis = vis[wplane == wp]
pwt = wt[wplane == wp]
midw = numpy.mean(puvw[:,2])
# w=0 plane? Just grid directly - skip Fresnel pattern (guaranteed to be =1) + FFTs
if abs(midw) < wstep / 2:
grid_sum += simple_imaging(theta, lam, puvw, src, pvis * pwt)
continue
# Determine uv bounds, round to grid cell
xy_min = numpy.floor(numpy.amin(puvw[:,:2], axis=0) * theta).astype(int)
xy_max = numpy.ceil(numpy.amax(puvw[:,:2], axis=0) * theta).astype(int)
# Make sure we have enough space for convolution.
xy_min -= (npixkern + 1) // 2
xy_max += npixkern // 2
xy_size = numpy.max(xy_max - xy_min)
print("w-plane %d: %d visibilities, %.1f average w, %dx%d cells" %
(wp, len(puvw), midw, xy_size, xy_size))
# Force quadratic - TODO: unneeded, strictly speaking
xy_maxq = numpy.amax([xy_max, xy_min + xy_size], axis=0)
# Determine the uvw size and mid-point
uvw_size = xy_size / theta
uvw_mid = numpy.hstack([(xy_maxq + xy_min) // 2 / theta, midw])
# Grid
pgrid = simple_imaging(theta, uvw_size, puvw - uvw_mid, src, pvis * pwt)
# Generate Fresnel pattern
l,m = theta*coordinates2(xy_size)
wkern = w_kernel_function(l, m, midw)
# Divide Fresnel pattern in image plane, then FFT right back
pgrid_w = fft(ifft(pgrid) / wkern)
# Add to original grid at offset
mid = int(lam*theta)//2
x0, y0 = mid + xy_min
x1, y1 = mid + xy_max
grid_sum[y0:y1, x0:x1] += pgrid_w[0:y1-y0, 0:x1-x0]
image_sum = ifft(grid_sum)
print("Done in %.1fs" % (time.time() - start_time))
show_image(2.0*numpy.real(image_sum), "image", theta)
uvbin_size = 256 - npixkern # Choose it so we get a nice 2^x size below
start_time = time.time()
uvw,vis,wplanes,wplane = flip_conj(uvw[:,1] < 0.0)
grid_sum = numpy.zeros((grid_size, grid_size), dtype=complex)
ubin = numpy.floor(uvw[:,0]*theta/uvbin_size).astype(int)
vbin = numpy.floor(uvw[:,1]*theta/uvbin_size).astype(int)
# Generate Fresnel pattern for shifting between two w-planes
# As this is the same between all w-planes, we can share it
# between the whole loop.
l,m = theta*coordinates2(uvbin_size + npixkern)
wkern = w_kernel_function(l, m, wstep)
for ub in range(numpy.min(ubin), numpy.max(ubin)+1):
for vb in range(numpy.min(vbin), numpy.max(vbin)+1):
# Find visibilities
bin_sel = numpy.logical_and(ubin == ub, vbin == vb)
if not numpy.any(bin_sel):
continue
# Determine bin dimensions
xy_min = uvbin_size * numpy.array([ub, vb], dtype=int)
xy_max = uvbin_size * numpy.array([ub+1, vb+1], dtype=int)
uv_min = xy_min / theta
uv_max = xy_min / theta
uv_mid = (xy_max + xy_min) // 2 / theta
# Make sure we have enough space for convolution.
xy_min -= (npixkern + 1) // 2
xy_max += npixkern // 2
assert(numpy.all(numpy.max(xy_max - xy_min) == uvbin_size+npixkern))
uvw_size = (uvbin_size+npixkern) / theta
# Make grid for uv-bin
bin_image_sum = numpy.zeros((uvbin_size+npixkern, uvbin_size+npixkern), dtype=complex)
nvis = 0; midws = []
last_wp = wplanes[0]
for wp in wplanes:
# Filter out visibilities for u/v-bin and w-plane
slc = numpy.logical_and(bin_sel, wplane == wp)
puvw = uvw[slc]
if len(puvw) == 0: continue
pvis = vis[slc]
pwt = wt[slc]
# Statistics
nvis += len(puvw)
midws.append(wp*wstep)
# w=0 plane? Just grid directly, as before
if wp == 0:
grid_sum += simple_imaging(theta, lam, puvw, src, pvis * pwt)
continue
# Bring image sum into this w-plane
if last_wp != wplanes[0]:
bin_image_sum *= wkern**(wp-last_wp)
last_wp = wp
# Grid relative to mid-point
uvw_mid = numpy.hstack([uv_mid, [wp*wstep]])
pgrid = simple_imaging(theta, uvw_size, puvw - uvw_mid, src, pvis * pwt)
# Add to bin grid
bin_image_sum += ifft(pgrid)
# No visibilities? Skip
if nvis == 0: continue
# Transfer into w=0 plane, FFT image sum
print("uv-bin %d,%d: %d visibilities, %s w-bins" % (ub, vb, nvis, numpy.array(midws, dtype=int)))
bin_image_sum /= wkern**last_wp
bin_grid = fft(bin_image_sum)
# Add to grid, keeping bounds in mind
mid = int(lam*theta)//2
x0, y0 = mid + xy_min
x1, y1 = mid + xy_max
x0b, y0b = numpy.amax([[x0, y0], [0,0]], axis=0)
x1b, y1b = numpy.amin([[x1, y1], [grid_size,grid_size]], axis=0)
grid_sum[y0b:y1b, x0b:x1b] += \
bin_grid[y0b-y0:y1b-y0, x0b-x0:x1b-x0]
image_sum = ifft(grid_sum)
print("Done in %.1fs" % (time.time() - start_time))
show_image(2.0 * numpy.real(image_sum), "image", theta)
image_show = numpy.real(image_sum)
step=int(grid_size/10)
def zoom(x, y=step): pylab.matshow(image_show[y:y+2*step,x:x+2*step]) ; pylab.colorbar(shrink=.4,pad=0.025); pylab.show()
from ipywidgets import interact
interact(zoom, x=(0,image_show.shape[0]-2*step,step), y=(0,image_show.shape[1]-2*step,step));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate baseline coordinates for an observation with the VLA over 6 hours, with a visibility recorded every 10 minutes. The phase center is fixed at a declination of 45 degrees. We assume that the imaged sky says at that position over the course of the observation.
Step2: We can now generate visibilities for these baselines by simulation. We place three sources.
Step3: Using imaging, we can now reconstruct the image. We split the visibilities into a number of w-bins
Step4: Simple w-stacking
Step5: This was the easiest version of w-stacking. Clearly a lot of w-planes are mostly empty, which is wasteful both in terms of FFT complexity and especially in terms of memory (bandwidth).
Step6: As you might notice, this is actually slower overall, because for lower w doing two FFTs per w-plane adds quite a bit of extra work.
Step7: By zooming in we can confirm output quality
|
7,135
|
<ASSISTANT_TASK:>
Python Code:
import QuantLib as ql
import matplotlib.pyplot as plt
%matplotlib inline
ql.__version__
# option data
maturity_date = ql.Date(15, 1, 2016)
spot_price = 127.62
strike_price = 130
volatility = 0.20 # the historical vols or implied vols
dividend_rate = 0.0163
option_type = ql.Option.Call
risk_free_rate = 0.001
day_count = ql.Actual365Fixed()
calendar = ql.UnitedStates()
calculation_date = ql.Date(8, 5, 2015)
ql.Settings.instance().evaluationDate = calculation_date
payoff = ql.PlainVanillaPayoff(option_type, strike_price)
settlement = calculation_date
am_exercise = ql.AmericanExercise(settlement, maturity_date)
american_option = ql.VanillaOption(payoff, am_exercise)
eu_exercise = ql.EuropeanExercise(maturity_date)
european_option = ql.VanillaOption(payoff, eu_exercise)
spot_handle = ql.QuoteHandle(
ql.SimpleQuote(spot_price)
)
flat_ts = ql.YieldTermStructureHandle(
ql.FlatForward(calculation_date, risk_free_rate, day_count)
)
dividend_yield = ql.YieldTermStructureHandle(
ql.FlatForward(calculation_date, dividend_rate, day_count)
)
flat_vol_ts = ql.BlackVolTermStructureHandle(
ql.BlackConstantVol(calculation_date, calendar, volatility, day_count)
)
bsm_process = ql.BlackScholesMertonProcess(spot_handle,
dividend_yield,
flat_ts,
flat_vol_ts)
steps = 200
binomial_engine = ql.BinomialVanillaEngine(bsm_process, "crr", steps)
american_option.setPricingEngine(binomial_engine)
print (american_option.NPV())
def binomial_price(option, bsm_process, steps):
binomial_engine = ql.BinomialVanillaEngine(bsm_process, "crr", steps)
option.setPricingEngine(binomial_engine)
return option.NPV()
steps = range(5, 200, 1)
eu_prices = [binomial_price(european_option, bsm_process, step) for step in steps]
am_prices = [binomial_price(american_option, bsm_process, step) for step in steps]
# theoretican European option price
european_option.setPricingEngine(ql.AnalyticEuropeanEngine(bsm_process))
bs_price = european_option.NPV()
plt.plot(steps, eu_prices, label="European Option", lw=2, alpha=0.6)
plt.plot(steps, am_prices, label="American Option", lw=2, alpha=0.6)
plt.plot([5,200],[bs_price, bs_price], "r--", label="BSM Price", lw=2, alpha=0.6)
plt.xlabel("Steps")
plt.ylabel("Price")
plt.ylim(6.7,7)
plt.title("Binomial Tree Price For Varying Steps")
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us consider a European and an American call option for AAPL with a strike price of \$130 maturing on 15th Jan, 2016. Let the spot price be \$127.62. The volatility of the underlying stock is known to be 20%, and has a dividend yield of 1.63%. Lets value these options as of 8th May, 2015.
Step2: We construct the European and American options here. The main difference here is in the Exercise type. One has to use AmericanExercise instead of EuropeanExercise to pass into the VanillaOption to construct an American option.
Step3: The Black-Scholes-Merton process is constructed here.
Step4: The value of the American option can be computed using a Binomial Engine using the CRR approach.
Step5: For illustration purpose, lets compare the European and American option prices using the binomial tree approach.
Step6: In the plot below, the binomial-tree approach is used to value American option for different number of steps. You can see the prices converging with increase in number of steps. The European option price is plotted along with BSM theoretical price for comparison purposes.
|
7,136
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
# only need this line for Python 2.7 ... by importing print() we also get support for unpacking within print
# * for unpacking is not recognized in this context in Python 2.7 normally
# arguments on print and behavior of print in this example is also Python 3.x which "from _future__" is importing
# assume input of 9:
N = 9
print(*range(N+1), sep='', end='')
# this larger number test is just for comparison with what follows in the attempt to do this mathematically
N = 113
print(*range(N+1), sep='', end='')
'''Initial experiment: use powers of 10, but start from the end so that we get: 1 * 10**N + 2 * 10**N-1 + ...
This idea fails however as soon as numbers get bigger than 9
This example outputs the source list generated by range() ahead of the answer just to show it. '''
def num_to_seqStr(N, showList = True):
lst = range(1,N+1)
answer = sum([i*10**(N-i) for i in lst])
if showList == True:
print(lst)
return answer
for i in range(11):
print("Answer: %s" %num_to_seqStr(i))
def findTens(N): # find the powers of 10 inside a number
incrementer = 1
while True:
if N - 10**incrementer < 0:
break
else:
incrementer += 1
if incrementer == 100:
break # debugging condition
return incrementer - 1
findTensTests = [findTens(0),
findTens(7),
findTens(112),
findTens(13),
findTens(1009)]
findTensTests
def create_seqNum(N, reverse=False, showWork=False, returnDescr=False, divLength=100):
'''create_seqNum() --> iput N, and get back a number built from the sequence of 1234...N
Arguments: reverse=True to get the sequence in revers, showWork=True to see numbers that add up to final,
returnDescr=True to print the answer in a sentence as well as returning it as a number.'''
num = 0
tensIncr = 0
answer = 0
Ntens = findTens(N)
modifier = 0 # modifies counter when increment of 10 occurs
if reverse == True: # create range() inputs
rstart = 1
rend = N+1
rinc = 1
else:
rstart = N
rend = 0
rinc = -1
for i in range(rstart, rend, rinc):
itens = findTens(i)
num = i * 10**tensIncr # how many zeroes do we need on the end of each num?
tensIncr += 1 + itens
pad = (Ntens - itens)
if showWork == True:
print(("For %d" + " "*pad + " Add: %d") %(i, num))
answer += num
if showWork == True:
print("#"*divLength)
if showWork == True or returnDescr == True:
print("Answer: %d" %answer)
print("#"*divLength)
return answer
print(create_seqNum.__doc__)
for i in [1, 5, 9, 10, 11, 13, 98, 99, 100, 101, 102, 107, 1012]:
create_seqNum(i, reverse=True, returnDescr=True)
create_seqNum(i, returnDescr=True)
create_seqNum(102, showWork=True)
import math # needed for log functions
def powOfTens(N): # find the powers of 10 inside a number
return int(math.log10(N)) # converts decimal to whole integer
# int() rounds down no matter how big the decimal
# note: math.log(x, 10) produces floating point rounding errors
# output is of form: (oritinalNumber, powersOfTens)
countTensTest = [(1, powOfTens(1)),
(7, powOfTens(7)),
(112, powOfTens(112)),
(13, powOfTens(13)),
(1009, powOfTens(1009))]
countTensTest
listofints = [1,2,3,9,10,11,12,19,99,100,101,102, 999, 1000, 1001, 50102030]
for i in listofints:
print(i, powOfTens(i), math.log10(i)) # show what we are really calculating:
# (original, function result, un-modified log)
# source: eruciform on StackOverflow
# note: powOfTens(x) is just int(math.log10(x))
import math # to get math.log
listofints = [1,2,3,9,10,11,12,19,99,100,101,102, 999, 1000, 1001, 50102030]
n = reduce(lambda x,y:[x[0]*(10**(y[1]+1))+y[0],0],map(lambda x:[x,powOfTens(x)], listofints))[0]
# to do this in one line with no external functions, replace powOfTens(x) w/:
# int(math.log10(x))
print(n)
# source: eruciform on StackOverflow
# we can also more simply crack this problem with just reduce()
n = reduce(lambda x,y:x*(10**(powOfTens(y)+1))+y,listofints)
# to do this in one line with no external functions, replace powOfTens(x) w/:
# int(math.log10(y))
print(n)
listofints = [1,2,3,9,10,11,12,19,99,100,101,102, 999, 1000, 1001, 50102030]
map(lambda x:[x,int(math.log10(x))], listofints)
reduce(lambda x,y:[x[0]*(10**(y[1]+1))+y[0],0],map(lambda x:[x,int(math.log10(x))], listofints))
# asume this is a subset from a list like this [ ... 999, 1001, 1002, ...]
# after mapping it with powOfTens() or int(math.log10(x)), the first two terms in the sample would look like this:
testVal = [[999, 2], [1001, 3]] # and it would continue with more terms
testFun = lambda x,y:[x[0]*(10**(y[1]+1))+y[0],0]
testFun(testVal[0], testVal[1])
# then, as reduce works its way up the list, the answer and the next term feed in like this:
testVal = [[9991001, 2], [1002, 3]]
testFun(testVal[0], testVal[1])
listofints = [1,2,3,9,10,11,12,19,99,100,101,102, 999, 1000, 1001, 50102030]
n = reduce(lambda x,y:x*(10**(int(math.log10(y))+1))+y,listofints)
print(n)
# for comparison, here is the lambda logic split in two functions from the earlier example
testFun_r = lambda x,y:[x[0]*(10**(y[1]+1))+y[0],0] # used in outer () to feed into reduce()
testFun_m = lambda x:[x,int(math.log10(x))] # used in inner () to feed into map()
# for this idea, only one lambda does it all, feeding directly into reduce():
testFun2 = lambda x,y:x*(10**(int(math.log10(y))+1))+y
# test it:
testVal = [999, 1001]
testFun2(testVal[0], testVal[1])
# now reduce() applies it across the whole original list: function(n1, n2) = result1, function(result1, n3) = result2 ...
# until it produces a final answer to return ("reducing the list down to one number").
listofints = [1,2,3,10,12,19,99,100,101,50102030]
n = reduce(lambda x,y:x*(10**(int(math.log10(y))+1))+y,listofints)
print(n)
1*10**(int(math.log10(2))+1)+2
# this function presented and tested in earlier sections
# repeated here (unchanged) for conveninece:
import math # needed for log functions
def powOfTens(N): # find the powers of 10 inside a number
if N < 1:
N = 1 # less than 1 would throw an error, this is a work-around based on
# how this function is used in the code
return int(math.log10(N)) # converts decimal to whole integer
# int() rounds down no matter how big the decimal
# note: math.log(x, 10) produces floating point rounding errors
from __future__ import print_function
def create_seqNumber(N, reverse=False, showWork=False, returnDescr=False, divLength=100):
'''create_seqNumber() --> iput N, and get back a number built from the sequence of 1234...N
Arguments: reverse=True to get the sequence in revers, showWork=True to see numbers that add up to final,
returnDescr=True to print the answer in a sentence as well as returning it as a number.'''
num = 0
tensIncr = 0
answer = 0
if isinstance(N, list):
if reverse == False:
listofints = N
else:
listofints = N[::-1]
elif isinstance(N, int):
Ntens = powOfTens(N)
if reverse == False: # create range builder inputs
rstart = 1
rend = N+1
rinc = 1
else:
rstart = N
rend = 0
rinc = -1
listofints = range(rstart, rend, rinc)
else:
print(type(N))
raise TypeError("Error: for create_seqNumber(N), N must be a list or an integer.")
answer = reduce(lambda x,y:x*(10**(powOfTens(y)+1))+y,listofints)
if showWork == True:
print("Show Work:")
print("#"*divLength)
worklist = [reduce(lambda x,y:x*(10**(powOfTens(y)+1))+y, listofints[0:2])]
worklist.append([reduce(lambda a,b:a*(10**(powOfTens(b)+1))+b, [worklist[-1], x])
for ind, x in enumerate(listofints[2:])])
worklist = [worklist[0]] + worklist[1]
# print("worklist: %s" %worklist)
# print("worklist[-1]", worklist[-1])
NpOfT = powOfTens(worklist[-1])
NpOfT2 = powOfTens(len(worklist)-1)
[(x, print(("%d)" + " "*(NpOfT2 - powOfTens(ind)) + " "*(NpOfT - powOfTens(x) + 1) + "%s") %(ind, x)))[0]
for ind, x in enumerate(worklist)]
print("#"*divLength)
if showWork == True or returnDescr == True:
print("Answer: %d" %answer)
print("#"*divLength)
return answer
create_seqNumber(15, showWork=False, returnDescr=True)
create_seqNumber(15, reverse=True, showWork=False, returnDescr=True)
create_seqNumber(102, reverse=False, showWork=False, returnDescr=True)
for i in [1, 5, 9, 10, 11, 13, 98, 99, 100, 101, 102, 107, 1012]: # returnDescr = False by default
print("F: %s" %(create_seqNumber(i, reverse=False)))
print("-----"*25)
print("R: %s" %(create_seqNumber(i, reverse=True)))
print("#####"*25)
tstLst = [1,2,3,9,10,11,12,59,99,100,101,50102030]
print("F: %s" %(create_seqNumber(tstLst, reverse=False)))
print("-----"*25)
print("R: %s" %(create_seqNumber(tstLst, reverse=True)))
print("#####"*25)
create_seqNumber(3, reverse=False, showWork=True, returnDescr=True)
create_seqNumber(13, reverse=True, showWork=True, returnDescr=True)
tstLst = [1,2,3,9,10,11,12,59,99,100,101,50102030]
create_seqNumber(tstLst, reverse=False, showWork=True, returnDescr=True)
create_seqNumber(1003, reverse=False, showWork=True, returnDescr=True)
create_seqNumber('15', showWork=False, returnDescr=True) # demo of TypeError the code raises for wrong input
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As a point of curiosity though ... how would we do it using math instead of relying on print tricks which probably convert the numbers to strings under the covers anyway? The basics of the solution is that the code needs to add zeroes to each number in the sequence in the right amount so that added together, they become the "string" of numbers
Step2: Note
Step3: log() To The Rescue for Part Of The Problem
Step4: <a id="eruciform_start" name="eruciform_start"></a>
Step5: Deconstructing the code from the inside out, this is what it is doing.
Step6: Each sublist now contains [original_number, number_of_tens_in_number]. reduce() "reduces" the list to a single number by applying the lambda function fed into it. It takes each term in the list as function(n1 , n2), then the result goes back into the function as function(result, n3), then function(result2, n4) ... and so on until the entire list is consumed and one answer is spit back.
Step7: The lambda function being used to reduce it, grabs the first term of of each sublist and multiplies by 10 to the power of the second term of the next sublist + 1 + the next term. The original format of the sublist is preserved by wrapping the whole thing in [] to make it a list that is then used to replace the original [term, number_powers_of_tens] sublist in listofints. This test function can better show what is happening
Step8: Extracting just the first term is why the whole thing ended with [0] in the original code
Step9: <a id="lambdareduce" name="lambdareduce"></a>
Step10: <a id="originalProblem" name="originalProblem"></a>
Step11: Testing with showWork Off
Step12: Testing with showWork On
|
7,137
|
<ASSISTANT_TASK:>
Python Code:
from osgeo import gdal
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# %load ../neon_aop_python_functions/raster2array.py
# raster2array.py reads in the first band of geotif file and returns an array and associated
# metadata dictionary.
# Input: raster_geotif (eg. 'raster.tif')
# Outputs:
# array_rows: # of rows in the array
# array_cols: # of columns in the array
# bands: # of bands
# driver: (for NEON data this is Geotif)
# projection:
# geotransform:
# pixelWidth: width of pixel (for NEON data this = 1)
# pixelHeight: height of pixel (for NEON data this = -1)
# ext_dict: dictionary of raster extent, containing the following information
# {'xMin': xMin_value,'xMax': xMax_value, 'yMin': yMin_value, 'yMax': yMax_value}
# Note: to extract a value from ext_dict, use the syntax: eg. xMin = metadata['ext_dict']['xMin']
# extent: raster extent values (xMin, xMax, yMin, yMax)
# noDataValue: no data value
# scaleFactor: scale factor
# band_stats: dictionary of statistics for band 1:
# {'min': min_value, 'max': max_value, 'mean': mean_value, 'stdev': stdev_value}
# Note: to extract a value from band_stats dictionary, use the syntax:
# eg. array_min = metadata['band_stats']['min']
# Usage: array, metadata = raster2array('raster.tif')
from osgeo import gdal
import numpy as np
def raster2array(geotif_file):
metadata = {}
dataset = gdal.Open(geotif_file)
metadata['array_rows'] = dataset.RasterYSize
metadata['array_cols'] = dataset.RasterXSize
metadata['bands'] = dataset.RasterCount
metadata['driver'] = dataset.GetDriver().LongName
metadata['projection'] = dataset.GetProjection()
metadata['geotransform'] = dataset.GetGeoTransform()
mapinfo = dataset.GetGeoTransform()
metadata['pixelWidth'] = mapinfo[1]
metadata['pixelHeight'] = mapinfo[5]
# metadata['xMin'] = mapinfo[0]
# metadata['yMax'] = mapinfo[3]
# metadata['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1]
# metadata['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5]
metadata['ext_dict'] = {}
metadata['ext_dict']['xMin'] = mapinfo[0]
metadata['ext_dict']['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1]
metadata['ext_dict']['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5]
metadata['ext_dict']['yMax'] = mapinfo[3]
metadata['extent'] = (metadata['ext_dict']['xMin'],metadata['ext_dict']['xMax'],
metadata['ext_dict']['yMin'],metadata['ext_dict']['yMax'])
if metadata['bands'] == 1:
raster = dataset.GetRasterBand(1)
metadata['noDataValue'] = raster.GetNoDataValue()
metadata['scaleFactor'] = raster.GetScale()
# band statistics
metadata['bandstats'] = {} #make a nested dictionary to store band stats in same
stats = raster.GetStatistics(True,True)
metadata['bandstats']['min'] = round(stats[0],2)
metadata['bandstats']['max'] = round(stats[1],2)
metadata['bandstats']['mean'] = round(stats[2],2)
metadata['bandstats']['stdev'] = round(stats[3],2)
array = dataset.GetRasterBand(1).ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(np.float)
array[array==int(metadata['noDataValue'])]=np.nan
array = array/metadata['scaleFactor']
return array, metadata
elif metadata['bands'] > 1:
print('More than one band ... fix function for case of multiple bands')
# %load ../neon_aop_python_functions/plot_band_array.py
def plot_band_array(band_array,refl_extent,title,cbar_label,colormap='spectral',alpha=1):
plt.imshow(band_array,extent=refl_extent,alpha=alpha);
cbar = plt.colorbar(); plt.set_cmap(colormap);
cbar.set_label(cbar_label,rotation=270,labelpad=20)
plt.title(title); ax = plt.gca();
ax.ticklabel_format(useOffset=False, style='plain') #do not use scientific notation #
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90) #rotate x tick labels 90 degree
#https://github.com/rveciana/introduccion-python-geoespacial/blob/master/hillshade.py
def hillshade(array,azimuth,angle_altitude):
azimuth = 360.0 - azimuth
x, y = np.gradient(array)
slope = np.pi/2. - np.arctan(np.sqrt(x*x + y*y))
aspect = np.arctan2(-x, y)
azimuthrad = azimuth*np.pi/180.
altituderad = angle_altitude*np.pi/180.
shaded = np.sin(altituderad)*np.sin(slope) + np.cos(altituderad)*np.cos(slope)*np.cos((azimuthrad - np.pi/2.) - aspect)
return 255*(shaded + 1)/2
# Use raster2array to convert TEAK DTM Geotif to array & plot
#dtm_array, dtm_metadata = raster2array('2013_TEAK_1_326000_4103000_DTM.tif')
dtm_array, dtm_metadata = raster2array('/Users/olearyd/Git/data/2013_TEAK_1_326000_4103000_DTM.tif')
plot_band_array(dtm_array,dtm_metadata['extent'],'TEAK DTM','Elevation, m',colormap='gist_earth')
ax = plt.gca(); plt.grid('on')
# Use hillshade function on a DTM Geotiff
hs_array = hillshade(dtm_array,225,45)
plot_band_array(hs_array,dtm_metadata['extent'],'TEAK Hillshade, Aspect=225°',
'Hillshade',colormap='Greys',alpha=0.8)
ax = plt.gca(); plt.grid('on')
#Overlay transparent hillshade on DTM:
fig = plt.figure(frameon=False)
im1 = plt.imshow(dtm_array,cmap='terrain_r',extent=dtm_metadata['extent']);
cbar = plt.colorbar(); cbar.set_label('Elevation, m',rotation=270,labelpad=20)
im2 = plt.imshow(hs_array,cmap='Greys',alpha=0.8,extent=dtm_metadata['extent']); #plt.colorbar()
ax=plt.gca(); ax.ticklabel_format(useOffset=False, style='plain') #do not use scientific notation
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90) #rotate x tick labels 90 degrees
plt.grid('on'); # plt.colorbar();
plt.title('TEAK Hillshade + DTM')
#Calculate CHM from DSM & DTM:
dsm_array, dsm_metadata = raster2array('/Users/olearyd/Git/data/2013_TEAK_1_326000_4103000_DSM.tif')
teak_chm = dsm_array - dtm_array;
plot_band_array(teak_chm,dtm_metadata['extent'],'TEAK Canopy Height Model','Canopy Height, m',colormap='Greens')
ax = plt.gca(); plt.grid('on')
#Overlay transparent hillshade on DTM:
fig = plt.figure(frameon=False)
#Terrain
im1 = plt.imshow(dtm_array,cmap='YlOrBr',extent=dtm_metadata['extent']);
cbar1 = plt.colorbar(); cbar1.set_label('Elevation, m',rotation=270,labelpad=20)
#Hillshade
im2 = plt.imshow(hs_array,cmap='Greys',alpha=.5,extent=dtm_metadata['extent']); #plt.colorbar()
#Canopy
im3 = plt.imshow(teak_chm,cmap='Greens',alpha=0.6,extent=dtm_metadata['extent']);
cbar2 = plt.colorbar(); cbar2.set_label('Canopy Height, m',rotation=270,labelpad=20)
ax=plt.gca(); ax.ticklabel_format(useOffset=False, style='plain') #do not use scientific notation
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90) #rotate x tick labels 90 degrees
plt.grid('on'); # plt.colorbar();
plt.title('TEAK 2013 \n Terrain, Hillshade, & Canopy Height')
#Importing the TEAK CHM Geotiff resulted in v. sparse data ?
chm_array, chm_metadata = raster2array('/Users/olearyd/Git/data/2013_TEAK_1_326000_4103000_pit_free_CHM.tif')
print('TEAK CHM Array\n:',chm_array)
# print(chm_metadata)
#print metadata in alphabetical order
for item in sorted(chm_metadata):
print(item + ':', chm_metadata[item])
# print(chm_metadata['extent'])
import copy
chm_nonzero_array = copy.copy(chm_array)
chm_nonzero_array[chm_array==0]=np.nan
print('TEAK CHM nonzero array:\n',chm_nonzero_array)
print(np.nanmin(chm_nonzero_array))
print(np.nanmax(chm_nonzero_array))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also need to import the following functions created in previous lessons
Step2: Calculate Hillshade
Step3: Now that we have a function to generate hillshade, we need to read in the NEON LiDAR Digital Terrain Model (DTM) geotif using the raster2array function and then calculate hillshade using the hillshade function. We can then plot both using the plot_band_array function.
Step4: Calculate CHM & Overlay on Top of Hillshade
Step5: Links to Tutorials on Creating Hillshades
|
7,138
|
<ASSISTANT_TASK:>
Python Code:
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.io import *
from fastai.conv_learner import *
from fastai.column_data import *
# PATH = Path('data/nietzsche/')
PATH = 'data/nietzsche/'
get_data("https://s3.amazonaws.com/text-datasets/nietzsche.txt", f'{PATH}nietzsche.txt')
text = open(f'{PATH}nietzsche.txt').read()
print('corpus length:', len(text))
text[:400]
chars = sorted(list(set(text)))
vocab_size = len(chars) + 1
print('total chars', vocab_size)
chars.insert(0, '\0')
''.join(chars[1:-5])
char_indices = {c: i for i, c in enumerate(chars)}
indices_char = {i: c for i, c in enumerate(chars)}
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
cs = 3
c1_dat = [idx[i] for i in range(0, len(idx)-cs, cs)] # every 1st char
c2_dat = [idx[i+1] for i in range(0, len(idx)-cs, cs)] # every 2nd
c3_dat = [idx[i+2] for i in range(0, len(idx)-cs, cs)] # every 3rd
c4_dat = [idx[i+3] for i in range(0, len(idx)-cs, cs)] # every 4th
x1 = np.stack(c1_dat)
x2 = np.stack(c2_dat)
x3 = np.stack(c3_dat)
y = np.stack(c4_dat)
x1[:4], x2[:4], x3[:4]
y[:4]
x1.shape, y.shape
n_hidden = 256
n_fac = 42 # about half the number of our characters
'0.3' in torch.__version__
class Char3Model(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac) # embedding
# the 'green arrow' from our diagram – the layer operation from input to hidden
self.l_in = nn.Linear(n_fac, n_hidden)
# the 'orange arrow' from our diagram – the layer operation from hidden to hidden
self.l_hidden = nn.Linear(n_hidden, n_hidden)
# the 'blue arrow' from our diagram – the layer operation from hidden to output
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, c1, c2, c3):
in1 = F.relu(self.l_in(self.e(c1)))
in2 = F.relu(self.l_in(self.e(c2)))
in3 = F.relu(self.l_in(self.e(c3)))
if '0.3' in torch.__version__:
h = V(torch.zeros(in1.size()).cuda())
h = F.tanh(self.l_hidden(h+in1))
h = F.tanh(self.l_hidden(h+in2))
h = F.tanh(self.l_hidden(h+in3))
else:
h = torch.zeros(in1.size()).cuda() # I dont think I have to wrap as Variable since this is pytorch 0.4, no?
h = torch.tanh(self.l_hidden(h + in1))
h = torch.tanh(self.l_hidden(h + in2))
h = torch.tanh(self.l_hidden(h + in3))
return F.log_softmax(self.l_out(h))
mdata = ColumnarModelData.from_arrays('.', [-1], np.stack([x1,x2,x3], axis=1), y, bs=512)
model = Char3Model(vocab_size, n_fac).cuda()
it = iter(mdata.trn_dl)
*xs,yt = next(it)
# tensor = model(*xs)
tensor = model(*V(xs))
optimizer = optim.Adam(model.parameters(), 1e-2)
set_lrs(optimizer, 1e-3)
fit(model, mdata, 1, optimizer, F.nll_loss)
set_lrs(optimizer, 1e-3)
fit(model, mdata, 1, optimizer, F.nll_loss)
def get_next(inp):
Takes a 3-char string.
Turns it into a Tensor of an array of the char index of the string.
Passes that tensor to the model.
Does an argmax to get the predicted char-number; then coverts to char.
idxs = T(np.array([char_indices[c] for c in inp]))
# pred = model(*idxs)
pred = model(*VV(idxs))
i = np.argmax(to_np(pred))
return chars[i]
get_next('y. '), get_next('ppl'), get_next(' th'), get_next('and')
cs = 8
c_in_dat = [[idx[i + j] for i in range(cs)] for j in range(len(idx) - cs)]
c_out_dat = [idx[j + cs] for j in range(len(idx) - cs)]
xs = np.stack(c_in_dat, axis=0); xs.shape
y = np.stack(c_out_dat); y.shape
xs[:cs, :cs]
y[:cs]
val_idx = get_cv_idxs(len(idx) - cs - 1)
mdata = ColumnarModelData.from_arrays('.', val_idx, xs, y, bs=512)
class CharLoopModel(nn.Module):
This is an RNN.
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.l_in = nn.Linear(n_fac, n_hidden)
self.l_hidden = nn.Linear(n_hidden, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
# h = torch.zeros(bs, n_hidden).cuda()
h = V(torch.zeros(bs, n_hidden).cuda())
for c in cs:
# inp = torch.tanh(self.l_in(self.e(c))) # the torch.tanh vs F.tanh warning didnt pop
# h = torch.tanh(self.l_hidden(h + inp)) # up on Mac, but did on Linux-gpu. Odd.
inp = F.relu(self.l_in(self.e(c)))
h = F.tanh(self.l_hidden(h+inp))
return F.log_softmax(self.l_out(h), dim=-1)
model = CharLoopModel(vocab_size, n_fac).cuda()
optimizer = optim.Adam(model.parameters(), 1e-2)
fit(model, mdata, 1, optimizer, F.nll_loss)
set_lrs(optimizer, 1e-3)
fit(model, mdata, 1, optimizer, F.nll_loss)
class CharLoopConcatModel(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.l_in = nn.Linear(n_fac + n_hidden, n_hidden)
self.l_hidden = nn.Linear(n_hidden, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
# h = torch.zeros(bs, n_hidden).cuda()
h = V(torch.zeros(bs, n_hidden).cuda())
for c in cs:
inp = torch.cat((h, self.e(c)), 1)
inp = F.relu(self.l_in(inp))
# h = torch.tanh(self.l_hidden(inp))
h = F.tanh(self.l_hidden(inp))
return F.log_softmax(self.l_out(inp), dim=-1)
model = CharLoopConcatModel(vocab_size, n_fac).cuda()
optimizer = optim.Adam(model.parameters(), 1e-3)
it = iter(mdata.trn_dl)
*xs,yt = next(it)
# t = model(*xs)
t = model(*V(xs))
xs[0].size(0)
t
fit(model, mdata, 1, optimizer, F.nll_loss)
set_lrs(optimizer, 1e-4)
fit(model, mdata, 1, optimizer, F.nll_loss)
if '0.3' in torch.__version__:
def get_next(inp):
idxs = T(np.array([char_indices[c] for c in inp]))
p = model(*VV(idxs))
i = np.argmax(to_np(p))
return chars[i]
else:
def get_next(inp):
# idxs = [T(np.array([char_indices[c] for c in inp]))]
idxs = [T(np.array([char_indices[c]])) for c in inp]
p = model(*idxs)
i = np.argmax(to_np(p))
# pdb.set_trace()
return chars[i]
get_next('for thos')
get_next('part of ')
get_next('queens a')
class CharRNN(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNN(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
# h = torch.zeros(1, bs, n_hidden)
h = V(torch.zeros(1, bs, n_hidden))
inp = self.e(torch.stack(cs))
outp,h = self.rnn(inp, h)
return F.log_softmax(self.l_out(outp[-1]))
# return F.log_softmax(self.l_out(outp[-1]), dim=-1) # outp[-1] to get last hidden state
model = CharRNN(vocab_size, n_fac).cuda()
optimizer = optim.Adam(model.parameters(), 1e-3)
it = iter(mdata.trn_dl)
*xs,yt = next(it)
# tensor = model.e(V(torch.stack(xs))) # works w/o V(.). but takes longer when switching btwn w/wo V(.)?
# tensor = model.e(torch.stack(xs)) # these are ints so cannot require gradients
# tensor = model.e(T(torch.stack(xs)))
tensor = model.e(V(torch.stack(xs)))
tensor.size()
# htensor = V(torch.zeros(1, 512, n_hidden)) # V(.) required here, else: RuntimeError: CuDNN error: CUDNN_STATUS_EXECUTION_FAILED
# NOTE: does not work: htensor = torch.zeros(1, 512, n_hidden, requires_grad=True) # requires_grad=True accomplishes what V(.) did in 0.3.1 for 0.4.
# htensor = T(torch.zeros(1, 512, n_hidden))
htensor = V(torch.zeros(1, 512, n_hidden))
outp, hn = model.rnn(tensor, htensor)
outp.size(), hn.size()
# the error when using pytorch 0.4:
tensor = model(*V(xs)); tensor.size()
tensor = model(*V(xs)); tensor.size()
fit(model, mdata, 4, optimizer, F.nll_loss)
set_lrs(opt, 1e-4)
fit(model, mdata, 2, optimizer, F.nll_loss)
def get_next(inp):
idxs = T(np.array([char_indices[c] for c in inp]))
p = model(*VV(idxs))
i = np.argmax(to_np(p))
return chars[i]
get_next('for thos')
def get_next_n(inp, n):
res = inp
for i in range(n):
c = get_next(inp)
res += c
inp = inp[1:] + c
return res
get_next_n('for thos', 40)
c_in_dat = [[idx[i+j] for i in range(cs)] for j in range(0, len(idx) - cs - 1, cs)]
c_out_dat = [[idx[i+j] for i in range(cs)] for j in range(1, len(idx) - cs, cs)]
xs = np.stack(c_in_dat)
xs.shape
ys = np.stack(c_out_dat)
ys.shape
xs[:cs, :cs]
ys[:cs, :cs]
val_idx = get_cv_idxs(len(xs) - cs - 1)
mdata = ColumnarModelData.from_arrays('.', val_idx, xs, ys, bs=512)
class CharSeqRNN(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNN(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
h = V(torch.zeros(1, bs, n_hidden))
inp = self.e(torch.stack(cs))
outp,h = self.rnn(inp, h)
return F.log_softmax(self.l_out(outp), dim=-1)
model = CharSeqRNN(vocab_size, n_fac).cuda()
optimizer = optim.Adam(model.parameters(), 1e-3)
it = iter(mdata.trn_dl)
*xst, yt = next(it)
def nll_loss_seq(inp, targ):
sl,bs,nh = inp.size() # 8 x 512 x nhidden
targ = targ.transpose(0,1).contiguous().view(-1)
return F.nll_loss(inp.view(-1, nh), targ)
fit(model, mdata, 4, optimizer, nll_loss_seq)
set_lrs(opt, 1e-4)
fit(model, mdata, 1, optimizer, nll_loss_seq)
model = CharSeqRNN(vocab_size, n_fac).cuda()
optimizer = optim.Adam(model.parameters(), 1e-2)
m.rnn.weight_hh_l0.data.copy_(torch.eye(n_hidden))
fit(model, mdata, 4, optimizer, nll_loss_seq)
set_lrs(optimizer, 1e-3)
fit(model, mdata, 4, opt, nll_loss_seq)
set_lrs(optimizer, 1e-4)
fit(model, mdata, 4, optimizer, nll_loss_seq)
from torchtext import vocab, data
from fastai.nlp import *
from fastai.lm_rnn import *
PATH = 'data/nietzsche/'
TRN_PATH = 'trn/'
VAL_PATH = 'val/'
TRN = f'{PATH}{TRN_PATH}'
VAL = f'{PATH}{VAL_PATH}'
## line counting: https://stackoverflow.com/a/3137099
# $ wc -l nietzsche/nietzsche.txt
## splitting: https://stackoverflow.com/a/2016918
# $ split -l 7947 nietzsche/nietzsche.txt
# $ mv xaa nietzsche/trn.txt
# $ mv xab nietzsche/val.txt
%ls {PATH}
%ls {PATH}trn
TEXT = data.Field(lower=True, tokenize=list) # torchtext
bs = 64; bptt = 8; n_fac = 42; n_hidden = 256
FILES = dict(train=TRN_PATH, validation=VAL_PATH, test=VAL_PATH)
mdata = LanguageModelData.from_text_files(PATH, TEXT, **FILES, bs=bs, bptt=bptt, min_freq=3)
len(mdata.trn_dl), mdata.nt, len(mdata.trn_ds), len(mdata.trn_ds[0].text)
class CharSeqStatefulRNN(nn.Module):
def __init__(self, vocab_size, n_fac, bs):
self.vocab_size = vocab_size
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNN(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h.size(1) != bs: self.init_hidden(bs)
outp,h = self.rnn(self.e(cs), self.h)
self.h = repackage_var(h) # bptt here; throw away hidden state's history
return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden))
m = CharSeqStatefulRNN(mdata.nt, n_fac, 512).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
fit(m, mdata, 4, opt, F.nll_loss)
set_lrs(opt, 1e-4)
fit(m, mdata, 4, opt, F.nll_loss)
# # From pytorch source:
# def RNNCell(input, hidden, w_ih, w_hh, b_ih, b_hh):
# return F.tanh(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh))
class CharSeqStatefulRNN2(nn.Module):
def __init__(self, vocab_size, n_fac, bs):
super().__init__()
self.vocab_size = vocab_size
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNNCell(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h.size(1) != bs: self.init_hidden(bs)
outp = []
o = self.h
for c in cs:
o = self.rnn(self.e(c), o)
outp.append(o)
outp = self.l_out(torch.stack(outp))
self.h = repackage_var(o)
return F.log_softmax(outp, dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden))
m = CharSeqStatefulRNN2(mdata.nt, n_fac, 512).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
fit(m, mdata, 4, opt, F.nll_loss)
class CharSeqStatefulGRU(nn.Module):
def __init__(self, vocab_size, n_fac, bs):
super().__init__()
self.vocab_size = vocab_size
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.GRU(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h.size(1) != bs: self.init_hidden(bs)
outp,h = self.rnn(self.e(cs), self.h)
self.h = repackage_var(h)
return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden))
# # From pytorch source code – for reference
# def GRUCell(input, hidden, w_ih, w_hh, b_ih, b_hh):
# gi = F.linear(input, w_ih, b_ih)
# gh = F.linear(hidden, w_hh, b_hh)
# i_r, i_i, i_n = gi.chunk(3, 1)
# h_r, h_i, h_n = gh.chunk(3, 1)
# resetgate = F.sigmoid(i_r + h_r)
# inputgate = F.sigmoid(i_i + h_i)
# newgate = F.tanh(i_h + resetgate * h_n)
# return newgate + inputgate * (hidden - newgate)
m = CharSeqStatefulGRU(mdata.nt, n_fac, 512).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
fit(m, mdata, 6, opt, F.nll_loss)
set_lrs(opt, 1e-4)
fit(m, mdata, 3, opt, F.nll_loss)
from fastai import sgdr
n_hidden = 512
class CharSeqStatefulLSTM(nn.Module):
def __init__(self, vocab_size, n_fac, bs, nl):
super().__init__()
self.vocab_size,self.nl = vocab_size,nl
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.LSTM(n_fac, n_hidden, nl, dropout=0.5)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h[0].size(1) != bs: self.init_hidden(bs)
outp,h = self.rnn(self.e(cs), self.h)
self.h = repackage_var(h)
return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs):
self.h = (V(torch.zeros(self.nl, bs, n_hidden)),
V(torch.zeros(self.nl, bs, n_hidden)))
m = CharSeqStatefulLSTM(mdata.nt, n_fac, 512, 2).cuda()
lo = LayerOptimizer(optim.Adam, m, 1e-2, 1e-5)
os.makedirs(f'{PATH}models', exist_ok=True)
fit(m, mdata, 2, lo.opt, F.nll_loss)
on_end = lambda sched, cycle: save_model(m, f'{PATH}models/cyc_{cycle}')
cb = [CosAnneal(lo, len(mdata.trn_dl), cycle_mult=2, on_cycle_end=on_end)]
fit(m, mdata, 2**4-1, lo.opt, F.nll_loss, callbacks=cb)
on_end = lambda sched, cycle: save_model(m, f'{PATH}models/cyc_{cycle}')
cb = [CosAnneal(lo, len(mdata.trn_dl), cycle_mult=2, on_cycle_end=on_end)]
fit(m, mdata, 2**6-1, lo.opt, F.nll_loss, callbacks=cb)
def get_next(inp):
idxs = TEXT.numericalize(inp)
p = m(VV(idxs.transpose(0,1)))
r = torch.multinomial(p[-1].exp(), 1)
return TEXT.vocab.itos[to_np(r)[0]]
get_next('for thos')
def get_next_n(inp, n):
res = inp
for i in range(n):
c = get_next(inp)
res += c
inp = inp[1:] + c
return res
print(get_next_n('for thos', 400))
print(get_next_n('the reason', 400))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Setup
Step2: Sometimes it's useful to have a zero value in the dataset, eg
Step3: Map from chars to indices and back again
Step4: idx will be the data we use form now on – it simply converts all characters to their index (based on the mapping above).
Step5: 2. Three char model
Step6: Our inputs
Step7: Our outputs
Step8: The first 4 inputs and outputs
Step9: 2.2 Create and train model
Step10: The number of latent factors to create (ie
Step12: 2.3 Test model
Step13: 3. Our first RNN
Step14: For each of 0 thru 8, create a list of every 8th character with that starting point. These will be the 8 inputs to our model.
Step15: So each column below is one series of 8 characters from the text.
Step16: they're overlapping. So after '[42, 29, 30, 25, 27, 29, 1, 1]' comes '1', and after '[29, 30, 25, 27, 29, 1, 1, 1]' comes '43', and so on. The nth row is the same as the nth column.
Step18: 3.2 Create and train model
Step19: The input and hidden states represent qualitatively different types of information, so adding them together can potentially lose information. Instead we can concatenate them together.
Step20: 3.3 Test Model
Step21: 4. RNN with PyTorch
Step22: I'm able to get this far in pytorch 0.4, using T instead of V. The problem is the next line keeps giving me a
Step23: 4.1 Test model
Step24: 5. Multi-output model
Step25: Then create the exact same thing, offset by 1, as our labels.
Step26: 5.2 Create and train model
Step27: 5.3 Identity init
Step28: 6. Stateful model
Step29: 6.2 RNN
Step30: 6.3 RNN loop
Step31: 6.4 GRU
Step32: 6.5 Putting it all together LSTM
Step33: 6.6 Test
|
7,139
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.2,<2.3"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
times1 = np.linspace(0,1,201)
times2 = np.linspace(90,91,201)
b.add_dataset('lc', times=times1, dataset='lc1')
b.add_dataset('lc', times=times2, dataset='lc2')
b.add_dataset('rv', times=times1, dataset='rv1')
b.add_dataset('rv', times=times2, dataset='rv2')
b.add_dataset('orb', times=times1, dataset='orb1')
b.add_dataset('orb', times=times2, dataset='orb2')
b.add_dataset('mesh', times=[0], dataset='mesh1', columns=['vws'])
b.add_dataset('mesh', times=[90], dataset='mesh2', columns=['vws'])
b['vgamma@system']
b['t0@system']
b['ltte@compute']
b.run_compute(irrad_method='none', model='0_false')
b['vgamma@system'] = 100
b.run_compute(irrad_method='none', model='100_false')
b.run_compute(irrad_method='none', ltte=True, model='100_true')
colors = {'0_false': 'b', '100_false': 'r', '100_true': 'g'}
afig, mplfig = b['lc'].plot(c=colors, linestyle='solid',
axorder={'lc1': 0, 'lc2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
afig, mplfig = b['rv'].plot(c=colors, linestyle='solid',
axorder={'rv1': 0, 'rv2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
afig, mplfig = b.filter(kind='orb', model=['0_false', '100_false']).plot(x='us', y='ws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
afig, mplfig = b.filter(kind='orb', model=['0_false', '100_false']).plot(x='times', y='vws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
afig, mplfig = b.filter(kind='orb', model=['100_false', '100_true']).plot(x='us', y='ws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
afig, mplfig = b.filter(kind='orb', model=['100_false', '100_true']).plot(x='times', y='vws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
afig, mplfig = b.filter(kind='mesh', model=['0_false', '100_false']).plot(x='us', y='ws',
axorder={'mesh1': 0, 'mesh2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
afig, mplfig = b.filter(kind='mesh', model=['100_false', '100_true']).plot(x='us', y='ws',
axorder={'mesh1': 0, 'mesh2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
b['primary@mesh1@0_false'].get_value('vws', time=0.0)[:5]
b['primary@mesh1@100_false'].get_value('vws', time=0.0)[:5]
b['primary@mesh1@100_true'].get_value('vws', time=0.0)[:5]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step2: Now we'll create empty lc, rv, orb, and mesh datasets. We'll then look to see how the systemic velocity (vgamma) affects the observables in each of these datasets, and how those are also affected by light-time effects (ltte).
Step3: Changing Systemic Velocity and LTTE
Step4: We'll leave it set at 0.0 for now, and then change vgamma to see how that affects the observables.
Step5: The option to enable or disable LTTE are in the compute options, we can either set ltte or we can just temporarily pass a value when we call run_compute.
Step6: Let's first compute the model with 0 systemic velocity and ltte=False (not that it would matter in this case). Let's also name the model so we can keep track of what settings were used.
Step7: For our second model, we'll set a somewhat ridiculous value for the systemic velocity (so that the affects are exagerated and clearly visible over one orbit), but leave ltte off.
Step8: Lastly, let's leave this value of vgamma, but enable light-time effects.
Step9: Influence on Light Curves (fluxes)
Step10: In each of the figures below, the first panel will be the first cycle (days 0-3) and the second panel will be 100 cycles later (days 900-903).
Step11: Influence on Radial Velocities
Step12: Influence on Orbits (positions, velocities)
Step13: Plotting the w-velocities with respect to time would show the same as the RVs, except without any Rossiter-McLaughlin like effects. Note however the flip in w-convention between vw and radial velocities (+w is defined as towards the observer to make a right-handed system, but by convention +rv is a red shift).
Step14: Now let's look at the effect that enabling ltte has on these same plots.
Step15: Influence on Meshes
Step16: As you can see, since the center of mass of the system was at 0,0,0 at t0 - including systemic velocity actually shows the system spiraling towards or away from the observer (who is in the positive w direction). In other words - the positions of the meshes are affected in the same way as the orbits (note the offset on the ylimit scales).
|
7,140
|
<ASSISTANT_TASK:>
Python Code:
import ee
from IPython import display
import math
from matplotlib import pyplot
import numpy
from osgeo import gdal
import tempfile
import tensorflow as tf
import urllib
import zipfile
ee.Initialize()
input_image = ee.Image('LANDSAT/LT5_L1T_TOA_FMASK/LT50100551998003CPE00')
def print_image(image):
display.display(display.Image(ee.data.getThumbnail({
'image': image.serialize(),
'dimensions': '360',
})))
print_image(input_image.visualize(
bands=['B3', 'B2', 'B1'],
min=0,
max=0.3,
))
print_image(input_image.visualize(
bands=['fmask'],
min=0,
max=4,
palette=['808080', '0000C0', '404040', '00FFFF', 'FFFFFF'],
))
def download_tif(image, scale):
url = ee.data.makeDownloadUrl(ee.data.getDownloadId({
'image': image.serialize(),
'scale': '%d' % scale,
'filePerBand': 'false',
'name': 'data',
}))
local_zip, headers = urllib.urlretrieve(url)
with zipfile.ZipFile(local_zip) as local_zipfile:
return local_zipfile.extract('data.tif', tempfile.mkdtemp())
def load_image(image, scale):
local_tif_filename = download_tif(image, scale)
dataset = gdal.Open(local_tif_filename, gdal.GA_ReadOnly)
bands = [dataset.GetRasterBand(i + 1).ReadAsArray() for i in range(dataset.RasterCount)]
return numpy.stack(bands, 2)
mask = input_image.mask().reduce('min')
data = load_image(input_image.addBands(mask), scale=240)
data[:,:,7] = numpy.equal(data[:,:,7], 4)
pyplot.imshow(numpy.clip(data[:,:,[3,2,1]] * 3, 0, 1))
pyplot.show()
HOLDOUT_FRACTION = 0.1
# Reshape into a single vector of pixels.
data_vector = data.reshape([data.shape[0] * data.shape[1], data.shape[2]])
# Select only the valid data and shuffle it.
valid_data = data_vector[numpy.equal(data_vector[:,8], 1)]
numpy.random.shuffle(valid_data)
# Hold out a fraction of the labeled data for validation.
training_size = int(valid_data.shape[0] * (1 - HOLDOUT_FRACTION))
training_data = valid_data[0:training_size,:]
validation_data = valid_data[training_size:-1,:]
# Compute per-band means and standard deviations of the input bands.
data_mean = training_data[:,0:7].mean(0)
data_std = training_data[:,0:7].std(0)
valid_data.shape
def make_nn_layer(input, output_size):
input_size = input.get_shape().as_list()[1]
weights = tf.Variable(tf.truncated_normal(
[input_size, output_size],
stddev=1.0 / math.sqrt(float(input_size))))
biases = tf.Variable(tf.zeros([output_size]))
return tf.matmul(input, weights) + biases
NUM_INPUT_BANDS = 7
NUM_HIDDEN_1 = 20
NUM_HIDDEN_2 = 20
NUM_CLASSES = 2
input = tf.placeholder(tf.float32, shape=[None, NUM_INPUT_BANDS])
labels = tf.placeholder(tf.float32, shape=[None])
normalized = (input - data_mean) / data_std
hidden1 = tf.nn.tanh(make_nn_layer(normalized, NUM_HIDDEN_1))
hidden2 = tf.nn.tanh(make_nn_layer(hidden1, NUM_HIDDEN_2))
logits = make_nn_layer(hidden2, NUM_CLASSES)
outputs = tf.argmax(logits, 1)
int_labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, int_labels, name='xentropy')
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(outputs, int_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
BATCH_SIZE = 1000
NUM_BATCHES = 1000
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
validation_dict = {
input: validation_data[:,0:7],
labels: validation_data[:,7],
}
for i in range(NUM_BATCHES):
batch = training_data[numpy.random.choice(training_size, BATCH_SIZE, False),:]
train_step.run({input: batch[:,0:7], labels: batch[:,7]})
if i % 100 == 0 or i == NUM_BATCHES - 1:
print('Accuracy %.2f%% at step %d' % (accuracy.eval(validation_dict) * 100, i))
output_data = outputs.eval({input: data_vector[:,0:7]})
output_image = output_data.reshape([data.shape[0], data.shape[1]])
red = numpy.where(data[:,:,8], output_image, 0.5)
blue = numpy.where(data[:,:,8], data[:,:,7], 0.5)
green = numpy.minimum(red, blue)
comparison_image = numpy.dstack((red, green, blue))
pyplot.figure(figsize = (12,12))
pyplot.imshow(comparison_image)
pyplot.show()
pyplot.figure(figsize = (12,12))
pyplot.imshow(comparison_image[300:500,600:,:], interpolation='nearest')
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize the Earth Engine client. This assumes that you have already configured Earth Engine credentials in this Datalab instance. If not, see the "Earth Engine Datalab Initialization.ipynb" notebook.
Step2: Inspect the Input Data
Step3: Let's define a helper function to make it easier to print thumbnails of Earth Engine images. (We'll be adding a library with utility functions like this one to the Earth Engine Python SDK, but for now we can do it by hand.)
Step4: Now we can use our helper function to quickly visualize the image and label data. The Fmask values are
Step5: Fetch the Input Data
Step6: Now we can use that function to load the data from Earth Engine, including a valid data band, as a numpy array. This may take a few seconds. We also convert the Fmask band to a binary cloud label (i.e. fmask=4).
Step7: Display the local data. This time, for variety, we display it as an NRG false-color image. We can use pyplot to display local numpy arrays.
Step8: Preprocess the Input Data
Step9: Build the TensorFlow Model
Step10: Here we define our TensorFlow model, a neural network with two hidden layers with tanh() nonlinearities. The main network has two outputs, continuous-valued “logits” representing non-cloud and cloud, respectively. The binary output is intepreted as the argmax of these outputs.
Step11: Train the Neural Network
Step12: Inspect the Results
Step13: We can zoom in on a particular region over on the right side of the image to see some of the disagreements. Red pixels represent comission errors and blue pixels represent omission errors relative to the labeled input data.
|
7,141
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import sygma
import omega
import stellab
#loading the observational data module STELLAB
stellab = stellab.stellab()
# OMEGA parameters for MW
mass_loading = 1 # How much mass is ejected from the galaxy per stellar mass formed
nb_1a_per_m = 3.0e-3 # Number of SNe Ia per stellar mass formed
sfe = 0.005 # Star formation efficiency, which sets the mass of gas
table = 'yield_tables/isotope_yield_table_MESA_only_ye.txt' # Yields for AGB and massive stars
#milky_way
o_mw = omega.omega(galaxy='milky_way',Z_trans=-1, table=table,sfe=sfe, DM_evolution=True,\
mass_loading=mass_loading, nb_1a_per_m=nb_1a_per_m, special_timesteps=60)
# Choose abundance ratios
%matplotlib nbagg
xaxis = '[Fe/H]'
yaxis = '[C/Fe]'
# Plot observational data points (Stellab)
stellab.plot_spectro(xaxis=xaxis, yaxis=yaxis,norm='Grevesse_Noels_1993',galaxy='milky_way',show_err=False)
# Extract the numerical predictions (OMEGA)
xy_f = o_mw.plot_spectro(fig=3,xaxis=xaxis,yaxis=yaxis,return_x_y=True)
# Overplot the numerical predictions (they are normalized according to Grevesse & Noels 1993)
plt.plot(xy_f[0],xy_f[1],linewidth=4,color='w')
plt.plot(xy_f[0],xy_f[1],linewidth=2,color='k',label='OMEGA')
# Update the existing legend
plt.legend(loc='center left', bbox_to_anchor=(1.01, 0.5), markerscale=0.8, fontsize=13)
# Choose X and Y limits
plt.xlim(-4.5,0.5)
plt.ylim(-1.4,1.6)
s0p0001=sygma.sygma(iniZ=0.0001)
s0p006=sygma.sygma(iniZ=0.006)
elem='[C/Fe]'
s0p0001.plot_spectro(fig=3,yaxis=elem,marker='D',color='b',label='Z=0.0001')
s0p006.plot_spectro(fig=3,yaxis=elem,label='Z=0.006')
# Plot the ejected mass of a certain element
elem='C'
s0p0001.plot_mass(fig=4,specie=elem,marker='D',color='b',label='Z=0.0001')
s0p006.plot_mass(fig=4,specie=elem,label='Z=0.006')
elem='C'
s0p0001.plot_mass_range_contributions(specie=elem,marker='D',color='b',label='Z=0.0001')
s0p006.plot_mass_range_contributions(specie=elem,label='Z=0.006')
s0p0001.plot_table_yield(fig=6,iniZ=0.0001,table='yield_tables/isotope_yield_table.txt',yaxis='C-12',
masses=[1.0, 1.65, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],marker='D',color='b',)
s0p006.plot_table_yield(fig=6,iniZ=0.006,table='yield_tables/isotope_yield_table.txt',yaxis='C-12',
masses=[1.0, 1.65, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulation of the Milky Way
Step2: Comparison of chemical evolution prediction with observation
Step3: Tracing back to simple stellar populations.
Step4: What is [C/Fe] for two SSPs at Z=0.006 and Z=0.0001?
Step5: Now lets focus on C. What is the evolution of the total mass of C?
Step6: Which stars contribute the most to C?
Step7: Which stellar yields are the most?
|
7,142
|
<ASSISTANT_TASK:>
Python Code:
a = 5
b = a + 3.1415
c = a / b
print(a, b, c)
s = 'Ice cream' # A string
f = [1, 2, 3, 4] # A list
d = 3.1415928 # A floating point number
i = 5 # An integer
b = True # A boolean value
type(s)
isinstance(s, str) # is s a string?
isinstance(f, int) # is s an integer?
a < 99
a > 99
a == 5.
True == 'True'
foo = [1, 2, 3, 4, 5 ,6]
5 in foo
'this' in 'What is this?'
'that' in 'What is this?'
def display_and_capitalize_string(input_str):
'''Documentation for this function, which can span
multiple
lines since triple quotes are used for this.
Takes in a string, prints that string, and then returns the same string but with it capitalized.'''
print(input_str) # print out to the screen the string that was input, called `input_str`
new_string = input_str.capitalize() # use built-in method for a string to capitalize it
return new_string
display_and_capitalize_string('hi')
# input variable, x. Internal to the function itself, it is called
# input_str.
x = 'hi'
# function f(x) is `display_and_capitalize_string`
# the function returns the variable `output_string`
output_string = display_and_capitalize_string('hi')
out_string = display_and_capitalize_string('banana')
assert(out_string == 'Banana')
from nose.tools import assert_equal
assert_equal(out_string, "Banana")
assert(out_string[0].isupper())
assert(out_string=='BANANA')
x = 20
if x < 10:
print('x is less than 10')
else:
print('x is more than 10')
s1 = 'hello'
s2 = "world"
s3 = '''strings can
also go 'over'
multiple "lines".'''
s2
print(s3)
print( s1 + ' ' + s2) # note, we need the space otherwise we would get 'helloworld'
s3.upper()
s3.capitalize()
s3.split()
words = s3.split()
'_'.join(words) # Here, we are using a method directly on the string '_' itself.
foo = [1., 2., 3, 'four', 'five', [6., 7., 8], 'nine']
type(foo)
foo[0]
foo[5]
foo[5][1] # Python is sequential, we can access an element within an element using sequential indexing.
foo[-1] # This is the way to access the last element.
foo[-3] # ...and the third to last element
foo[-3][2] # we can also index strings.
# create a sequence of 10 elements, starting with zero, up to but not including 10.
bar = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
bar[2:5]
bar[:4]
bar[:]
bar[::2]
bar[5] = -99
bar
bar[2:7] = [1, 1, 1, 1, 1, 1, 1, 1]
bar
bar.insert(5, 'here')
bar
bar = [4, 5, 6, 7, 3, 6, 7, 3, 5, 7, 9]
bar.sort() # Note that we don't do 'bar = bar.sort()'. The sorting is done in place.
bar
foo = (3, 5, 7, 9)
# foo[2] = -999 # gives an assignment error. Commented so that all cells run.
a, b, c = 1, 2, 3 # Equivalent to '(a, b, c) = (1, 2, 3)'
print(b)
foobar = {'a':3, 'b':4, 'c':5}
foobar['b']
foobar['c'] = -99
foobar
foobar.keys()
foobar.values()
foobar['spam'] = 'eggs'
foobar
empty_dict = dict()
empty_list = list()
print(empty_dict, empty_list)
True and True, True and False
True or True, True or False
not True, not False
word = 'the'
sentence1 = 'the big brown dog'
sentence2 = 'I stand at the fridge'
sentence3 = 'go outside'
(word in sentence1) and (word in sentence2)
(word in sentence1) and (word in sentence2) and (word in sentence3)
(word in sentence1) or (word in sentence2) or (word in sentence3)
x = 20
5 < x < 30, 5 < x and x < 30
sum_of_squares = 0
for n in range(100): # range yields a sequence of numbers from 0 up to but not including 100
sum_of_squares += n**2 # the '+=' operator is equivalent to 'sum = sum + n**2',
# the '**' operator is a power, like '^' in other languages
print(sum_of_squares)
# THIS IS BETTER THAN THE NEXT CODE BLOCK. DO IT THIS WAY.
words = ['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']
sentence = '' # this initializes a string which we can then add onto
for word in words:
sentence += word + ' '
sentence
# DON'T DO IT THIS WAY IF POSSIBLE, DO IT THE WAY IN THE PREVIOUS CODE BLOCK.
words = ['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']
sentence = ''
for i in range(len(words)):
sentence += words[i] + ' '
sentence
for idx, word in enumerate(words):
print('The index is', idx, '...')
print('...and the word is', word)
[n**2 for n in range(10)]
random_list = [1, 2, 'three', 4.0, ['five',]]
[isinstance(item, str) for item in random_list]
random_list = [1, 2, 'three', 4.0, ['five',]]
foo = []
for item in random_list:
foo.append(isinstance(item, str))
foo
n = 5 # starting value
while n > 0:
n -= 1 # subtract 1 each loop
print(n) # look at value of n
# print all the numbers, except 5
for n in range(10):
if n == 5:
continue
print(n)
# print all the numbers up to (but not including) 5, then break out of the loop.
for n in range(10):
print('.')
if n == 5:
break
print(n)
print('done')
# pass can be used for empty functions or classes,
# or in loops (in which case it is usually a placeholder for future code)
def foo(x):
pass
class Foo(object):
pass
x = 2
if x == 1:
pass # could just leave this part of the code out entirely...
elif x == 2:
print(x)
def addfive(x):
return x+5
addfive(3.1415)
def sasos(a, b, c):
'''return the sum of a, b, and c and the sum of the squares of a, b, and c'''
res1 = a + b + c
res2 = a**2 + b**2 + c**2
return res1, res2
s, ss = sasos(3, 4, 5)
print(s)
print(ss)
def powsum(x, y, z, a=1, b=2, c=3):
return x**a + y**b + z**c
print( powsum(2., 3., 4.) )
print( powsum(2., 3., 4., b=5) )
print( powsum(z=2., c=2, x=3., y=4.) )
def addfive(x):
'''Return the argument plus five
Input : x
A number
Output: foo
The number x plus five
'''
return x+5
# now, try addfive?
addfive?
x = 5
def changex(x): # This x is local to the function
x += 10. # here the local variable x is changed
print('Inside changex, x=', x)
return x
res = changex(x) # supply the value of x in the 'global' scope.
print(res)
print(x) # The global x is unchanged
x = 5
def dostuffwithx(y):
res = y + x # Here, the global value of x is used, since it is not defined inside the function.
return res
print(dostuffwithx(3.0))
print(x)
list(range(3, 6)) # normal call with separate arguments
args = [3, 6]
list(range(*args)) # call with arguments unpacked from a list
x = 5; y = 6; z = 7
powdict = {'a': 1, 'b': 2, 'c': 3}
print(powsum(x, y, z, a=1, b=2, c=3))
print(powsum(x, y, z, **powdict))
list(zip((1, 2, 3, 4, 5), ('a', 'b', 'c', 'd', 'e'), (6, 7, 8, 9, 10)))
pts = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10))
x, y = list(zip(*pts))
print(x)
print(y)
# and back again,
print(list(zip(*(x,y))))
from math import sqrt # more on importing external packages below
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def norm(self):
'The distance of the point from the origin'
return sqrt(self.x**2 + self.y**2)
def dist(self, other):
'The distance to another point'
dx = self.x - other.x
dy = self.y - other.y
return sqrt(dx**2 + dy**2)
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __repr__(self):
return 'Point(%f, %f)' % (self.x, self.y)
p1 = Point(3.3, 4.) # a point at location (3, 4)
p2 = Point(6., 8.) # another point, we can have as many as we want..
res = p1.norm()
print('p1.norm() = ', res)
res = p2.norm()
print('p2.norm() = ', res)
res = p1.dist(p2)
res2 = p2.dist(p1)
print('The distance between p1 and p2 is', res)
print('The distance between p2 and p1 is', res2)
p3 = p1+p2
p1
import math # This imports the math function. Here 'math' is like a subdirectory
# in your namespace that holds all of the math functions
math.e
e = 15.7
print(math.e, e)
import numpy as np
a = np.array([[1., 2., 3], [4., 5., 6.]])
a
np.sin(a)
math.sin(2.0) == np.sin(2.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note, we did not need to declare variable types (like in fortran), we could just assign anything to a variable and it works. This is the power of an interpreted (as opposed to compiled) language. Also, we can add different types (a is an integer, and we add the float 3.1415 to get b). The result is 'upcast' to whatever data type can handle the result. I.e., adding a float and an int results in a float.
Step2: Side note
Step3: Exercise
Step4: C. Tests for equality and inequality
Step5: These statements have returned "booleans", which are True and False only. These are commonly used to check for conditions within a script or function to determine the next course of action.
Step6: There are other things that can be tested, not just mathematical equalities. For example, to test if an element is inside of a list or string (or any sequence, more on sequences below..), do
Step7: D. Intro to functions
Step8: This is analogous to the relationship between a variable and a function in math. The variable is $x$, and the function is $f(x)$, which changes the input $x$ in some way, then returns a new value. To access that returned value, you have to use the function -- not just define the function.
Step9: Exercise
Step10: We know that the assert statements passed because no error was thrown. On the other hand, the following test does not run successfully
Step11: Exercise
Step12: Exercise
Step13: You can also 'add' strings using 'operator overloading', meaning that the plus sign can take on different meanings depending on the data types of the variables you are using it on.
Step14: We can include special characters in strings. For example \n gives a newline, \t a tab, etc. Notice that the multiple line string above (s3) is converted to a single quote string with the newlines 'escaped' out with \n.
Step15: Strings are 'objects' in that they have 'methods'. Methods are functions that act on the particular instance of a string object. You can access the methods by putting a dot after the variable name and then the method name with parentheses (and any arguments to the method within the parentheses). Methods always have to have parentheses, even if they are empty.
Step16: One of the most useful string methods is 'split' that returns a list of the words in a string, with all of the whitespace (actual spaces, newlines, and tabs) removed. More on lists next.
Step17: Another common thing that is done with strings is the join method. It can be used to join a sequence of strings given a common conjunction
Step18: G. Containers
Step19: Note that lists (unlike arrays, as we will later learn) can be heterogeneous. That is, the elements in the list don't have to have the same kind of data type. Here we have a list with floats, ints, strings, and even another (nested) list!
Step20: We can get a sub-sequence from the list by giving a range of the data to extract. This is done by using the format
Step21: Exercise
Step22: This works for sequences as well,
Step23: Lists are also 'objects'; they also have 'methods'. Methods are functions that are designed to be applied to the data contained in the list. You can access them by putting a dot and the method name after the variable (called an 'object instance')
Step24: Exercise
Step25: Tuples are often used when a function has multiple outputs, or as a lightweight storage container. Becuase of this, you don't need to put the parentheses around them, and can assign multiple values at a time.
Step26: Dictionaries
Step27: Elements are referenced and assigned by keys
Step28: The keys and values can be extracted as lists using methods of the dictionary class.
Step29: New values can be assigned simply by assigning a value to a key that does not exist yet
Step30: Exercise
Step31: H. Logical Operators
Step32: Note that you can also use the word not to switch the meaning of a boolean
Step33: Now let's look at this with actual test examples instead of direct boolean values
Step34: I. Loops
Step35: You can iterate over any sequence, and in Python (like MATLAB) it is better to iterate over the sequence you want than to loop over the indices of that sequence. The following two examples give the same result, but the first is much more readable and easily understood than the second. Do the first whenever possible.
Step36: Sometimes you want to iterate over a sequence but you also want the indices of those elements. One way to do that is the enumerate function
Step37: List comprehension
Step38: The element can be any code snippet that depends on the item. This example gives a sequence of boolean values that determine if the element in a list is a string.
Step39: Exercise
Step40: Flow control
Step41: J. Functions
Step42: Function inputs and outputs
Step43: Functions can have variables with default values. You can also specify positional variables out of order if they are labeled explicitly.
Step44: Exercise
Step45: See PEP-257 for guidelines about writing good docstrings.
Step46: Variables from the 'global' scope can be used within a function, as long as those variables are unchanged. This technique should generally only be used when it is very clear what value the global variable has, for example, in very short helper functions.
Step47: Packing and unpacking function arguments
Step48: You can also unpack dictionaries as keyword arguments by placing ** in front of the dictionary, like
Step49: One common usage is using the builtin zip function to take a 'transpose' of a set of points.
Step50: K. Classes
Step51: Notice that we don't require other to be a Point class instance; it could be any object with x and y attributes. This is known as 'object composition' and is a useful approach for using multiple different kinds of objects with similar data in the same functions.
Step52: Exercise
Step53: The numpy package has the same math functions as the math package, but these functions are designed to work with numpy arrays. Arrays are the backbone of the numpy package. For now, just think of them as homogeneous, multidimensional lists.
Step54: Note that we can have two sin functions at the same time, one from the math package and one from the numpy package. This is one of the advantages of namespaces.
|
7,143
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pymc3 import *
import numpy as np
import matplotlib.pyplot as plt
size = 200
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
# y = a + b*x
true_regression_line = true_intercept + true_slope * x
# add noise
y = true_regression_line + np.random.normal(scale=.5, size=size)
data = dict(x=x, y=y)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, xlabel='x', ylabel='y', title='Generated data and underlying model')
ax.plot(x, y, 'x', label='sampled data')
ax.plot(x, true_regression_line, label='true regression line', lw=2.)
plt.legend(loc=0);
with Model() as model: # model specifications in PyMC3 are wrapped in a with-statement
# Define priors
sigma = HalfCauchy('sigma', beta=10, testval=1.)
intercept = Normal('Intercept', 0, sd=20)
x_coeff = Normal('x', 0, sd=20)
# Define likelihood
likelihood = Normal('y', mu=intercept + x_coeff * x,
sd=sigma, observed=y)
# Inference!
start = find_MAP() # Find starting value by optimization
step = NUTS(scaling=start) # Instantiate MCMC sampling algorithm
trace = sample(2000, step, start=start, progressbar=False) # draw 2000 posterior samples using NUTS sampling
with Model() as model:
# specify glm and pass in data. The resulting linear model, its likelihood and
# and all its parameters are automatically added to our model.
glm.glm('y ~ x', data)
start = find_MAP()
step = NUTS(scaling=start) # Instantiate MCMC sampling algorithm
trace = sample(2000, step, progressbar=False) # draw 2000 posterior samples using NUTS sampling
plt.figure(figsize=(7, 7))
traceplot(trace[100:])
plt.tight_layout();
plt.figure(figsize=(7, 7))
plt.plot(x, y, 'x', label='data')
glm.plot_posterior_predictive(trace, samples=100,
label='posterior predictive regression lines')
plt.plot(x, true_regression_line, label='true regression line', lw=3., c='y')
plt.title('Posterior predictive regression lines')
plt.legend(loc=0)
plt.xlabel('x')
plt.ylabel('y');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating data
Step2: Estimating the model
Step3: This should be fairly readable for people who know probabilistic programming. However, would my non-statistican friend know what all this does? Moreover, recall that this is an extremely simple model that would be one line in R. Having multiple, potentially transformed regressors, interaction terms or link-functions would also make this much more complex and error prone.
Step4: Much shorter, but this code does the exact same thing as the above model specification (you can change priors and everything else too if we wanted). glm() parses the Patsy model string, adds random variables for each regressor (Intercept and slope x in this case), adds a likelihood (by default, a Normal is chosen), and all other variables (sigma). Finally, glm() then initializes the parameters to a good starting point by estimating a frequentist linear model using statsmodels.
Step5: The left side shows our marginal posterior -- for each parameter value on the x-axis we get a probability on the y-axis that tells us how likely that parameter value is.
|
7,144
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import collections
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_federated as tff
import tensorflow_privacy as tfp
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
def get_emnist_dataset():
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def element_fn(element):
return collections.OrderedDict(
x=tf.expand_dims(element['pixels'], -1), y=element['label'])
def preprocess_train_dataset(dataset):
# Use buffer_size same as the maximum client dataset size,
# 418 for Federated EMNIST
return (dataset.map(element_fn)
.shuffle(buffer_size=418)
.repeat(1)
.batch(32, drop_remainder=False))
def preprocess_test_dataset(dataset):
return dataset.map(element_fn).batch(128, drop_remainder=False)
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
emnist_test = preprocess_test_dataset(
emnist_test.create_tf_dataset_from_all_clients())
return emnist_train, emnist_test
train_data, test_data = get_emnist_dataset()
def my_model_fn():
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape(input_shape=(28, 28, 1), target_shape=(28 * 28,)),
tf.keras.layers.Dense(200, activation=tf.nn.relu),
tf.keras.layers.Dense(200, activation=tf.nn.relu),
tf.keras.layers.Dense(10)])
return tff.learning.from_keras_model(
keras_model=model,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
input_spec=test_data.element_spec,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
# Run five clients per thread. Increase this if your runtime is running out of
# memory. Decrease it if you have the resources and want to speed up execution.
tff.backends.native.set_local_python_execution_context(clients_per_thread=5)
total_clients = len(train_data.client_ids)
def train(rounds, noise_multiplier, clients_per_round, data_frame):
# Using the `dp_aggregator` here turns on differential privacy with adaptive
# clipping.
aggregation_factory = tff.learning.model_update_aggregator.dp_aggregator(
noise_multiplier, clients_per_round)
# We use Poisson subsampling which gives slightly tighter privacy guarantees
# compared to having a fixed number of clients per round. The actual number of
# clients per round is stochastic with mean clients_per_round.
sampling_prob = clients_per_round / total_clients
# Build a federated averaging process.
# Typically a non-adaptive server optimizer is used because the noise in the
# updates can cause the second moment accumulators to become very large
# prematurely.
learning_process = tff.learning.build_federated_averaging_process(
my_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.01),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(1.0, momentum=0.9),
model_update_aggregation_factory=aggregation_factory)
eval_process = tff.learning.build_federated_evaluation(my_model_fn)
# Training loop.
state = learning_process.initialize()
for round in range(rounds):
if round % 5 == 0:
metrics = eval_process(state.model, [test_data])['eval']
if round < 25 or round % 25 == 0:
print(f'Round {round:3d}: {metrics}')
data_frame = data_frame.append({'Round': round,
'NoiseMultiplier': noise_multiplier,
**metrics}, ignore_index=True)
# Sample clients for a round. Note that if your dataset is large and
# sampling_prob is small, it would be faster to use gap sampling.
x = np.random.uniform(size=total_clients)
sampled_clients = [
train_data.client_ids[i] for i in range(total_clients)
if x[i] < sampling_prob]
sampled_train_data = [
train_data.create_tf_dataset_for_client(client)
for client in sampled_clients]
# Use selected clients for update.
state, metrics = learning_process.next(state, sampled_train_data)
metrics = eval_process(state.model, [test_data])['eval']
print(f'Round {rounds:3d}: {metrics}')
data_frame = data_frame.append({'Round': rounds,
'NoiseMultiplier': noise_multiplier,
**metrics}, ignore_index=True)
return data_frame
data_frame = pd.DataFrame()
rounds = 100
clients_per_round = 50
for noise_multiplier in [0.0, 0.5, 0.75, 1.0]:
print(f'Starting training with noise multiplier: {noise_multiplier}')
data_frame = train(rounds, noise_multiplier, clients_per_round, data_frame)
print()
import matplotlib.pyplot as plt
import seaborn as sns
def make_plot(data_frame):
plt.figure(figsize=(15, 5))
dff = data_frame.rename(
columns={'sparse_categorical_accuracy': 'Accuracy', 'loss': 'Loss'})
plt.subplot(121)
sns.lineplot(data=dff, x='Round', y='Accuracy', hue='NoiseMultiplier', palette='dark')
plt.subplot(122)
sns.lineplot(data=dff, x='Round', y='Loss', hue='NoiseMultiplier', palette='dark')
make_plot(data_frame)
rdp_orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] +
list(range(5, 64)) + [128, 256, 512])
total_clients = 3383
base_noise_multiplier = 0.5
base_clients_per_round = 50
target_delta = 1e-5
target_eps = 2
def get_epsilon(clients_per_round):
# If we use this number of clients per round and proportionally
# scale up the noise multiplier, what epsilon do we achieve?
q = clients_per_round / total_clients
noise_multiplier = base_noise_multiplier
noise_multiplier *= clients_per_round / base_clients_per_round
rdp = tfp.compute_rdp(
q, noise_multiplier=noise_multiplier, steps=rounds, orders=rdp_orders)
eps, _, _ = tfp.get_privacy_spent(rdp_orders, rdp, target_delta=target_delta)
return clients_per_round, eps, noise_multiplier
def find_needed_clients_per_round():
hi = get_epsilon(base_clients_per_round)
if hi[1] < target_eps:
return hi
# Grow interval exponentially until target_eps is exceeded.
while True:
lo = hi
hi = get_epsilon(2 * lo[0])
if hi[1] < target_eps:
break
# Binary search.
while hi[0] - lo[0] > 1:
mid = get_epsilon((lo[0] + hi[0]) // 2)
if mid[1] > target_eps:
lo = mid
else:
hi = mid
return hi
clients_per_round, _, noise_multiplier = find_needed_clients_per_round()
print(f'To get ({target_eps}, {target_delta})-DP, use {clients_per_round} '
f'clients with noise multiplier {noise_multiplier}.')
rounds = 100
noise_multiplier = 1.2
clients_per_round = 120
data_frame = pd.DataFrame()
data_frame = train(rounds, noise_multiplier, clients_per_round, data_frame)
make_plot(data_frame)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <table class="tfo-notebook-buttons" align="left">
Step2: Some imports we will need for the tutorial. We will use tensorflow_federated, the open-source framework for machine learning and other computations on decentralized data, as well as tensorflow_privacy, the open-source library for implementing and analyzing differentially private algorithms in tensorflow.
Step3: Run the following "Hello World"
Step4: Download and preprocess the federated EMNIST dataset.
Step5: Define our model.
Step6: Determine the noise sensitivity of the model.
Step7: Now we can visualize the evaluation set accuracy and loss of those runs.
Step8: It appears that with 50 expected clients per round, this model can tolerate a noise multiplier of up to 0.5 without degrading model quality. A noise multiplier of 0.75 seems to cause a bit of model degradation, and 1.0 makes the model diverge.
Step9: Now we can train our final private model for release.
|
7,145
|
<ASSISTANT_TASK:>
Python Code:
from flexx.webruntime import launch
rt = launch('http://flexx.rtfd.org', 'xul', title='Test title')
from flexx.pyscript import py2js
print(py2js('square = lambda x: x**2'))
def foo(n):
res = []
for i in range(n):
res.append(i**2)
return res
print(py2js(foo))
def foo(n):
return [i**2 for i in range(n)]
print(py2js(foo))
from flexx import react
@react.input
def name(n='john doe'):
if not isinstance(n, str):
raise ValueError('Name must be a string')
return n.capitalize()
name
@react.connect('name')
def greet(n):
print('hello %s' % n)
name("almar klein")
@react.connect('first_name', 'last_name')
def greet(first, last):
print('hello %s %s!' % (first, last))
class Person(react.HasSignals):
@react.input
def father(f):
assert isinstance(f, Person)
return f
@react.connect('father.last_name')
def last_name(s):
return s
@react.connect('children.*.name')
def child_names(*names):
return ', '.join(name)
from flexx import app, react
app.init_notebook()
class Greeter(app.Pair):
@react.input
def name(s):
return str(s)
class JS:
@react.connect('name')
def _greet(name):
alert('Hello %s!' % name)
greeter = Greeter()
greeter.name('John')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: flexx.pyscript
Step2: flexx.react
Step3: A signal can have multiple upstream signals.
Step4: Dynamism provides great flexibility
Step5: flexx.app
|
7,146
|
<ASSISTANT_TASK:>
Python Code:
from google.colab import drive
drive.mount('/content/gdrive')
! mkdir gdrive/MyDrive/rf_keras
%cd gdrive/MyDrive/rf_keras
! ls
! git clone https://github.com/google-research/receptive_field.git
! ls
%cd receptive_field/
! ls
! pip install .
! pip install tensorflow
import tensorflow.compat.v1 as tf
import receptive_field as rf
# Example given here: InceptionV3.
g = tf.Graph()
with g.as_default():
tf.keras.backend.set_learning_phase(0) # Disable BN learning.
x = tf.keras.Input([None, None, 3], name='input_image')
model = tf.keras.applications.InceptionV3(input_tensor=x)
graph_def = g.as_graph_def()
input_node = 'input_image'
output_node = 'conv2d_85/Conv2D'
(receptive_field_x, receptive_field_y, effective_stride_x,
effective_stride_y, effective_padding_x, effective_padding_y) = (
rf.compute_receptive_field_from_graph_def(graph_def, input_node,
output_node))
print(receptive_field_x)
print(receptive_field_y)
print(effective_stride_x)
print(effective_stride_y)
print(effective_padding_x)
print(effective_padding_y)
node_info, name_to_node = rf.get_compute_order(graph_def, input_node_name='input_image')
order_to_info = {}
for _, info in node_info.items():
order_to_info[info.order] = info
print(len(order_to_info.keys()))
for i in range(len(order_to_info.keys())):
print(order_to_info[i])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Computing receptive field parameters of tf.keras.applications models.
Step2: Bonus stuff
|
7,147
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import matplotlib.pyplot as plt
import pandas as pd
print(pd.__version__)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
# let's see what compute devices we have available, hopefully a GPU
sess = tf.Session()
devices = sess.list_devices()
for d in devices:
print(d.name)
# a small sane check, does tf seem to work ok?
hello = tf.constant('Hello TF!')
print(sess.run(hello))
!curl -O https://raw.githubusercontent.com/DJCordhose/ai/master/notebooks/manning/model/insurance.hdf5
model = tf.keras.models.load_model('insurance.hdf5')
# a little sane check, does it work at all?
# within this code, we expect Olli to be a green customer with a high prabability
# 0: red
# 1: green
# 2: yellow
olli_data = [100, 47, 10]
X = np.array([olli_data])
model.predict(X)
!pip install tensorflowjs
!rm -rf tfjs
!mkdir tfjs
!tensorflowjs_converter --input_format keras \
./model/insurance.hdf5 \
./tfjs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading and validating our model
Step2: Descison Boundaries for 2 Dimensions
Step3: Converting our Keras Model to TensorFlow.js
Step4: Use the command line tool
|
7,148
|
<ASSISTANT_TASK:>
Python Code:
import nltk
from tethne.readers import zotero
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import gensim
import networkx as nx
import pandas as pd
from collections import defaultdict, Counter
wordnet = nltk.WordNetLemmatizer()
stemmer = nltk.SnowballStemmer('english')
stoplist = stopwords.words('english')
text_root = '../data/EmbryoProjectTexts/files'
zotero_export_path = '../data/EmbryoProjectTexts'
corpus = nltk.corpus.PlaintextCorpusReader(text_root, 'https.+')
metadata = zotero.read(zotero_export_path, index_by='link', follow_links=False)
def normalize_token(token):
Convert token to lowercase, and stem using the Porter algorithm.
Parameters
----------
token : str
Returns
-------
token : str
return wordnet.lemmatize(token.lower())
def filter_token(token):
Evaluate whether or not to retain ``token``.
Parameters
----------
token : str
Returns
-------
keep : bool
token = token.lower()
return token not in stoplist and token.isalpha() and len(token) > 2
documents=[[normalize_token(token)
for token in corpus.words(fileids=[fileid])
if filter_token(token)]
for fileid in corpus.fileids()]
years = [metadata[fileid].date for fileid in corpus.fileids()]
wordcounts = nltk.FreqDist([token for document in documents for token in document])
wordcounts.plot(20)
documentcounts = nltk.FreqDist([token for document in documents for token in set(document)])
documentcounts.plot(80)
filtered_documents = [[token for token in document
if wordcounts[token] < 2000
and 1 < documentcounts[token] < 350]
for document in documents]
dictionary = gensim.corpora.Dictionary(filtered_documents)
documents_bow = [dictionary.doc2bow(document) for document in filtered_documents]
model = gensim.models.LdaModel(documents_bow,
id2word=dictionary,
num_topics=20,
update_every=0,
passes=20)
for i, topic in enumerate(model.print_topics(num_topics=20, num_words=5)):
print i, ':', topic
documents_lda = model[documents_bow]
documents_lda[6]
topic_counts = defaultdict(Counter)
for year, document in zip(years, documents_lda):
for topic, representation in document:
topic_counts[topic][year] += 1.
topics_over_time = pd.DataFrame(columns=['Topic', 'Year', 'Count'])
i = 0
for topic, yearcounts in topic_counts.iteritems():
for year, count in yearcounts.iteritems():
topics_over_time.loc[i] = [topic, year, count]
i += 1
topics_over_time
topic_0_over_time = topics_over_time[topics_over_time.Topic == 0]
plt.bar(topic_0_over_time.Year, topic_0_over_time.Count)
plt.ylabel('Number of documents')
plt.show()
from scipy.spatial import distance
distance.cosine
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1.7. Finding concepts in texts - Latent Dirichlet Allocation
Step3: We will represent our documents as a list of lists. Each sub-list contains tokens in the document.
Step4: Further filtering
Step5: Here we filter the tokens in each document, preserving the shape of the corpus.
Step6: It's easier to compute over integers, so we use a Dictionary to create a mapping between words and their integer/id representation.
Step7: The doc2bow() converts a document (series of tokens) into a bag-of-words representation.
Step8: We're ready to fit the model! We pass our BOW-transformed documents, our dictionary, and the number of topics. update_every=0 disables an "online" feature in the sampler (used for very very large corpora), and passes=20 tells the sampler to pass over the whole corpus 20 times.
|
7,149
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
import os
from collections import Counter
from nltk.tokenize import TweetTokenizer
import codecs
from random import randint
tf.__version__
with codecs.open(os.path.join('../data', 'sent.csv'), 'r', encoding='utf-8') as f:
corpus_line_by_line = f.read().lower().split("\n")
corpus_line_by_line = corpus_line_by_line[:1000]
corpus_line_by_line = [line.rstrip('\r').split(',') for line in corpus_line_by_line]
corpus_line_by_line[0]
tw = TweetTokenizer()
corpus_tokenized = list(map(lambda line: (line[0], tw.tokenize(line[1])), corpus_line_by_line))
corpus_tokenized[0]
corpus = []
for sent in corpus_tokenized:
corpus.extend([word for word in sent[1]])
def build_vocab(words, vocab_size):
Build vocabulary of VOCAB_SIZE most frequent words
dictionary = dict()
count = [('<UNK>', -1)]
count.extend(Counter(words).most_common(vocab_size - 1))
index = 0
for word, _ in count:
dictionary[word] = index
index += 1
index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, index_dictionary
vocabulary_size = len(set(corpus))
vocabulary_size
vocabulary, reverse_vocabulary = build_vocab(corpus, vocabulary_size)
for word, index in zip(reverse_vocabulary.values(), vocabulary.values()):
print(word, index)
def index_words_in_corpus(corpus):
return [vocabulary[token] if token in vocabulary else 0 for token in corpus]
corpus_indexed = [(line[0], index_words_in_corpus(line[1])) for line in corpus_tokenized]
len(corpus_indexed)
corpus_indexed[0]
vocabulary_size
def one_hot_encode(review):
temp = np.zeros(vocabulary_size)
for indx in review:
temp[indx] = 1
return temp
def one_hot_encode_class(sentiment):
temp = np.zeros(2)
if sentiment == 1:
temp[1] = 1
else:
temp[0] = 1
return temp
# data = np.array([(one_hot_encode(word), vocabulary.get(word)) for word in corpus])
data = np.array([one_hot_encode(word[1]) for word in corpus_indexed])
print("TRAIN: (Total number of words, Vocabulary size):", data.shape)
sample = data[0]
np.where(sample == 1)[0]
def decode_sentence(sample):
sentence = []
for i in range(sample.shape[0]):
if sample[i] == 1:
sentence.append(reverse_vocabulary[i])
return ' '.join([i for i in sentence])
print(sample, decode_sentence(sample))
def get_batches(batch_size):
idx = randint(data.shape[0]-3)
return data[idx:idx+batch_size]
def xavier_init(n_inputs, n_outputs, uniform=True):
Set the parameter initialization using the method described.
This method is designed to keep the scale of the gradients roughly the same
in all layers.
Xavier Glorot and Yoshua Bengio (2010):
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.
Args:
n_inputs: The number of input nodes into each output.
n_outputs: The number of output nodes for each input.
uniform: If true use a uniform distribution, otherwise use a normal.
Returns:
An initializer.
if uniform:
# 6 was used in the paper.
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
# 3 gives us approximately the same limits as above since this repicks
# values greater than 2 standard deviations from the mean.
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
'''A recent paper by He, Rang, Zhen and Sun they build on Glorot & Bengio and suggest using 2/size_of_input_neuron
'''
def xavier_init(size):
in_dim = size[0]
# xavier_stddev = 1. / in_dim
# xavier_stddev = 2. / in_dim
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
with tf.name_scope('Input'):
X = tf.placeholder(tf.float32, shape=[None, vocabulary_size])
with tf.name_scope('Discriminator_weights_biases'):
D_W1 = tf.Variable(xavier_init([vocabulary_size, 128]))
D_b1 = tf.Variable(tf.zeros(shape=[128]))
D_W2 = tf.Variable(xavier_init([128, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
def discriminator(x):
D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
with tf.name_scope('Latent_space'):
Z = tf.placeholder(tf.float32, shape=[None, 100])
with tf.name_scope('Generator_weights_biases'):
G_W1 = tf.Variable(xavier_init([100, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, vocabulary_size]))
G_b2 = tf.Variable(tf.zeros(shape=[vocabulary_size]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_Z(m, n):
return np.random.uniform(0., 1., size=[m, n])
def generator(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
G_sample = generator(Z)
D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)
with tf.name_scope('cost'):
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
with tf.name_scope('train'):
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
minibatch_size = 128
Z_dim = 100
saver = tf.train.Saver(max_to_keep=1)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
i = 0
for it in range(100000):
if it % 1000 == 0:
samples = sess.run(G_sample, feed_dict={Z: sample_Z(1, Z_dim)})
for sample in samples:
sample[sample > 0.95] = 1
print(samples.shape, decode_sentence(sample))
X_mb = get_batches(minibatch_size)
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(minibatch_size, Z_dim)})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(minibatch_size, Z_dim)})
if it % 1000 == 0:
print('Iter: {}'.format(it))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print()
saver.save(sess, './tf_model/generator', global_step=1000000)
saver.restore(sess, './tf_model/generator-1000000')
samples = sess.run(G_sample, feed_dict={Z: sample_Z(1, Z_dim)})
for sample in samples:
print(sample)
sample[sample == 1.] = 1
sample[sample < 1.] = 0
print(np.where(sample == 0)[0])
print(decode_sentence(sample))
with codecs.open("../data/sentiment.vec", 'r', encoding='utf-8') as f:
next(f)
word_vectors_twitter = f.read().split('\n')
word_vectors_twitter[0]
word_vectors = {}
for lin in word_vectors_twitter:
line = lin.split()
# print(line)
try:
word_vectors[line[0]] = np.array(line[1:])
except:
print(line, lin)
len(list(word_vectors.keys()))
word_vectors['to']
from sklearn.metrics.pairwise import cosine_similarity
cosine_similarity([[1, 0, -1]], [[-1,-1, 0]])
def nn(word):
nearest = 0
max = (0, 'max')
for w in word_vectors.keys():
try:
if cosine_similarity([word_vectors[word]], [word_vectors[w]])[0][0] > max[0] and w != word:
max = (cosine_similarity([word_vectors[word]], [word_vectors[w]])[0][0], w)
except:
pass
# print(w, word_vectors[word], word_vectors[w])
print(max[1])
nn('hate')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vanilla Generative Adversarial <img src="http
Step3: State of art weight Initialization strategy
Step4: Discriminator
Step5: Generator
Step6: Check if the generator is saved properly
Step7: Reasons why sentences seem nonsensical
Step8: Missed out on 5 words
Step9: Function to find nearest neighbours
|
7,150
|
<ASSISTANT_TASK:>
Python Code:
print("A", "B", "A|B", "A&B", "not A")
for A in [False, True]:
for B in [False, True]:
print(A, B, A or B, A and B, not A)
number = 987
rbase = 16
result = ""
while number > 0:
remainder = number % rbase
result = str(remainder) + result
number = number // rbase
print(result)
## vraag de gebruiker om zijn of haar naam,
## en druk daarmee een groet af
mass = input("Geef de massa (in kg): ")
velocity = input("Geef de snelheid (in m/s): ")
momentum = mass * velocity
print("Momentum: {} N/s".format(momentum))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Opdracht 2
Step2: Opdracht 3
Step3: Opdracht 4
|
7,151
|
<ASSISTANT_TASK:>
Python Code:
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
%%microblaze base.PMODA
#include "xparameters.h"
#include "xtmrctr.h"
#include "gpio.h"
#include "timer.h"
#include <pmod_grove.h>
#define TCSR0 0x00
#define TLR0 0x04
#define TCR0 0x08
#define TCSR1 0x10
#define TLR1 0x14
#define TCR1 0x18
#define MAX_COUNT 0xFFFFFFFF
void create_10us_pulse(gpio usranger){
gpio_set_direction(usranger, GPIO_OUT);
gpio_write(usranger, 0);
delay_us(2);
gpio_write(usranger, 1);
delay_us(10);
gpio_write(usranger, 0);
}
void configure_as_input(gpio usranger){
gpio_set_direction(usranger, GPIO_IN);
}
unsigned int capture_duration(gpio usranger){
unsigned int count1, count2;
count1=0;
count2=0;
XTmrCtr_WriteReg(XPAR_TMRCTR_0_BASEADDR, 0, TLR0, 0x0);
XTmrCtr_WriteReg(XPAR_TMRCTR_0_BASEADDR, 0, TCSR0, 0x190);
while(!gpio_read(usranger));
count1=XTmrCtr_ReadReg(XPAR_TMRCTR_0_BASEADDR, 0, TCR0);
while(gpio_read(usranger));
count2=XTmrCtr_ReadReg(XPAR_TMRCTR_0_BASEADDR, 0, TCR0);
if(count2 > count1) {
return (count2 - count1);
} else {
return((MAX_COUNT - count1) + count2);
}
}
unsigned int read_raw(){
gpio usranger;
usranger = gpio_open(PMOD_G1_A);
create_10us_pulse(usranger);
configure_as_input(usranger);
return capture_duration(usranger);
}
from pynq import Clocks
def read_distance_cm():
raw_value = read_raw()
clk_period_ns = int(1000 / Clocks.fclk0_mhz)
num_microseconds = raw_value * clk_period_ns * 0.001
if num_microseconds * 0.001 > 30:
return 500
else:
return num_microseconds/58
read_distance_cm()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Use Microblaze to control the ultrasonic ranger
Step2: 2. Do one-time distance measurement
|
7,152
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('../')
import numpy as np
from anemoi import MiniZephyr25D, SimpleSource, AnalyticalHelmholtz
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png')
matplotlib.rcParams['savefig.dpi'] = 150 # Change this to adjust figure size
systemConfig = {
'dx': 1., # m
'dz': 1., # m
'c': 2500., # m/s
'rho': 1., # kg/m^3
'nx': 100, # count
'nz': 200, # count
'freq': 2e2, # Hz
'nky': 160,
'3D': True,
}
nx = systemConfig['nx']
nz = systemConfig['nz']
dx = systemConfig['dx']
dz = systemConfig['dz']
MZ = MiniZephyr25D(systemConfig)
AH = AnalyticalHelmholtz(systemConfig)
SS = SimpleSource(systemConfig)
xs, zs = 25, 25
sloc = np.array([xs, zs]).reshape((1,2))
q = SS(sloc)
uMZ = MZ*q
uAH = AH(sloc)
clip = 0.01
plotopts = {
'vmin': -np.pi,
'vmax': np.pi,
'extent': [0., dx * nx, dz * nz, 0.],
'cmap': cm.bwr,
}
fig = plt.figure()
ax1 = fig.add_subplot(1,4,1)
plt.imshow(np.angle(uAH.reshape((nz, nx))), **plotopts)
plt.title('AH Phase')
ax2 = fig.add_subplot(1,4,2)
plt.imshow(np.angle(uMZ.reshape((nz, nx))), **plotopts)
plt.title('MZ Phase')
plotopts.update({
'vmin': -clip,
'vmax': clip,
})
ax3 = fig.add_subplot(1,4,3)
plt.imshow(uAH.reshape((nz, nx)).real, **plotopts)
plt.title('AH Real')
ax4 = fig.add_subplot(1,4,4)
plt.imshow(uMZ.reshape((nz, nx)).real, **plotopts)
plt.title('MZ Real')
fig.tight_layout()
fig = plt.figure()
ax = fig.add_subplot(1,1,1, aspect=1000)
plt.plot(uAH.real.reshape((nz, nx))[:,xs], label='AnalyticalHelmholtz')
plt.plot(uMZ.real.reshape((nz, nx))[:,xs], label='MiniZephyr')
plt.legend(loc=1)
plt.title('Real part of response through xs=%d'%xs)
uMZr = uMZ.reshape((nz, nx))
uAHr = uAH.reshape((nz, nx))
plotopts.update({
'cmap': cm.jet,
'vmin': 0.,
'vmax': 50.,
})
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
plt.imshow(abs(uAHr - uMZr)/(abs(uAHr)+1e-15) * 100, **plotopts)
cb = plt.colorbar()
cb.set_label('Percent error')
plotopts.update({'vmax': 10.})
ax2 = fig.add_subplot(1,2,2)
plt.imshow(abs(uAHr - uMZr)/(abs(uAHr)+1e-15) * 100, **plotopts)
cb = plt.colorbar()
cb.set_label('Percent error')
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Error plots for MiniZephyr vs. the AnalyticalHelmholtz response
Step2: Relative error of the MiniZephyr solution (in %)
|
7,153
|
<ASSISTANT_TASK:>
Python Code:
for i in range(10):
print(i)
for i in range(300, 306):
print(i)
for i in range(15, 26, 3):
print(i)
numbers = list(range(10))
print(numbers[3:])
words = "Be yourself; everyone else is already taken".split()
for i in range(len(words)):
print(words[i])
for word in words:
print(word)
counter = 0
for word in words:
print(word, ": index", counter)
counter+=1
for i in range(len(words)):
print(words[i], ": index", i)
print(list(enumerate(words)))
for mini_tuple in enumerate(words):
print(mini_tuple)
item = (5, 'already')
index, word = item # this is the same as: index, word = (5, "already")
print(index)
print(word)
for item in enumerate(words):
index, word = item
print(index)
print(word)
print("=======")
for index, word in enumerate(words):
print(index)
print(word)
print("====")
for i, word in enumerate(words):
print(word, ": index", i)
titles = ["Emma", "Stoner", "Inferno", "1984", "Aeneid"]
authors = ["J. Austen", "J. Williams", "D. Alighieri", "G. Orwell", "P. Vergilius"]
dates = ["1815", "2006", "Ca. 1321", "1949", "before 19 BC"]
list(zip(titles, authors))
list(zip(titles, dates))
list(zip(authors, dates))
list(zip(authors, titles, dates))
for author, title in zip(authors, titles):
print(author)
print(title)
print("===")
import string
words = "I have not failed . I’ve just found 10,000 ways that won’t work .".split()
word_lengths = []
for word in words:
if word not in string.punctuation:
word_lengths.append(len(word))
print(word_lengths)
word_lengths = [len(word) for word in words if word not in string.punctuation]
print(word_lengths)
print(type(word_lengths))
words_without_punc = [word for word in words if word not in string.punctuation]
print(words_without_punc)
all_word_lengths = [len(word) for word in words]
print(all_word_lengths)
square_numbers = [x*x for x in range(10)]
print(square_numbers)
tuple_word_lengths = tuple(len(word) for word in words if word not in string.punctuation)
print(tuple_word_lengths)
print(type(tuple_word_lengths))
tuple_word_lengths = tuple()
for word in words:
if word not in string.punctuation:
tuple_word_lengths.append(len(word))
print(tuple_word_lengths)
nested_list = [[x,x+2] for x in range(10, 22, 3)]
print(nested_list)
print(type(nested_list))
print(type(nested_list[3]))
nested_tuple = [(x,x+2) for x in range(10, 22, 3)]
print(nested_tuple)
print(type(nested_tuple))
print(type(nested_tuple[3]))
nested_tuple = tuple((x,x+2) for x in range(10, 22, 3))
print(nested_tuple)
print(type(nested_tuple))
print(type(nested_tuple[3]))
a = [2, 3, 5, 7, 0, 2, 8]
b = [3, 2, 1, 7, 0, 0, 9]
diffs = [a-b for a,b in zip(a, b)]
print(diffs)
diffs = [abs(a-b) for a,b in zip(a, b) if (a & b)]
print(diffs)
A = tuple([x-1,x+3] for x in range(10, 100, 3))
B = [(n*n, n+50) for n in range(10, 1000, 3) if n <= 100]
sums = sum(tuple(item_a[1]+item_b[0] for item_a, item_b in zip(A[:10], B[:10])))
print(sums)
text = "This text contains a lot of different characters, but probably not all of them."
chars = {char.lower() for char in text if char not in string.punctuation}
print(chars)
counts = {word:len(word) for word in words}
print(counts)
from IPython.core.display import HTML
def css_styling():
styles = open("styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here, range() will return a number of integers, starting from zero, up to (but not including) the number which we pass as an argument to the function. Using range() is of course much more convenient to generate such lists of numbers than writing e.g. a while-loop to achieve the same result. Note that we can pass more than one argument to range(), if we want to start counting from a number higher than zero (which will be the default when you only pass a single parameter to the function)
Step2: We can even specify a 'step size' as a third argument, which controls how much a variable will increase with each step
Step3: If you don't specify the step size explicitly, it will default to 1. If you want to store or print the result of calling range(), you have to cast it explicitly, for instance, to a list
Step4: Enumerate
Step5: Naturally, the same result can just as easily be obtained using a for-loop
Step6: One drawback of such an easy-to-write loop, however, is that it doesn't keep track of the index of the word that we are printing in one of the iterations. Suppose that we would like to print the index of each word in our example above, we would then have to work with a counter...
Step7: ... or indeed use a call to range() and len()
Step8: A function that makes life in Python much easier in this respect is enumerate(). If we pass a list to enumerate(), it will return a list of mini-tuples
Step9: Here -- as with range() -- we have to cast the result of enumerate() to e.g. a list before we can actually print it. Iterating over the result of enumerate(), on the other hand, is not a problem. Here, we print out each mini-tuple, consisting of an index and an time in a for-loop
Step10: When using such for-loops and enumerate(), we can do something really cool. Remember that we can 'unpack' tuples
Step11: In our for-loop example, we can apply the same kind of unpacking in each iteration
Step12: However, there is also a super-convenient shortcut for this in Python, where we unpack each item in the for-statement already
Step13: How cool is that? Note how easy it becomes now, to solve our problem with the index above
Step14: Zip
Step15: In each of these lists, the third item always corresponds to Dante's masterpiece and the last item to the Aeneid by Vergil, which inspired him. The use of zip() can now easily be illustrated
Step16: Do you see what happened here? In fact, zip() really functions like a 'zipper' in the real-world
Step17: How awesome is that? Here too
Step18: As you can understand, this is really useful functionality for dealing with long, complex lists and especially combinations of them.
Step19: We can create the exact same list of numbers using a list comprehension which only takes up one line of Python code
Step20: OK, impressive, but there are a lot of new things going on here. Let's go through this step by step. The first step is easy
Step21: Inside the squared brackets, we can find the actual comprehension which will determine what goes inside our new list. Note that it not always possible to read these comprehensions for left to right, so you will have to get used to the way they are build up from a syntactic point of view. First of all, we add an expression that determines which elements will make it into our list, in this case
Step22: Moreover, we don't have to include the if-statement at the end (it is always optional)
Step23: In the comprehensions above, words is the only pre-existing input to our comprehension; all the other variables are created and manipulated inside the function. The new range() function which we saw at the beginning of this chapter is also often often as the input for a comprehension
Step24: Importantly, we can just as easily create a tuple using the same comprehension syntax, but this time calling tuple() on the comprehension, instead of using the squared brackets to create a normal list
Step25: This is very useful, especially if you can figure out why the following code block will generate an error...
Step26: Good programmers can do amazing things with comprehensions. With list comprehensions, it becomes really easy, for example, to create nested lists (lists that themselves consist of lists or tuples). Can you figure out what is happening in the following code block
Step27: In the first line above, we create a new list (nested_list) but we don't fill it with single numbers, but instead with mini-lists that contain two values. We could just as easily have done this with mini-tuples, by using round brackets. Can you spot the differences below?
Step28: Note that zip() can also be very useful in this respect, because you can unpack items inside the comprehension. Do you understand what is going in the following code block
Step29: Again, more complex comprehensions are thinkable
Step30: Great
Step31: Finally, we should also mention that dictionaries and sets can also be filled in a one-liner using such comprehensions. For sets, the syntax runs entirely parallel to that of list and tuple comprehensions, but here, we use curly brackets to surround the expression
Step32: For dictionaries, which consist of key-value pairs, the syntax is only slightly more complicated. Here, you have to make sure that you link the correct key to the correct value using a colon, in the very first part of the comprehension. The following example will make this clearer
Step33: You've reached the end of Chapter 7! Ignore the code below, it's only here to make the page pretty
|
7,154
|
<ASSISTANT_TASK:>
Python Code:
%run "../Functions/1. Google form analysis.ipynb"
%run "../Functions/4. User comparison.ipynb"
#getAllResponders()
setAnswerTemporalities(gform)
# small sample
#allData = getAllUserVectorData( getAllUsers( rmdf1522 )[:10] )
# complete set
#allData = getAllUserVectorData( getAllUsers( rmdf1522 ) )
# subjects who answered the gform
allData = getAllUserVectorData( getAllResponders() )
# 10 subjects who answered the gform
#allData = getAllUserVectorData( getAllResponders()[:10] )
efficiencies = allData.loc['efficiency'].sort_values()
efficiencies.index = range(0, len(allData.columns))
efficiencies.plot(title = 'efficiency')
efficiencies2 = allData.loc['efficiency'].sort_values()
efficiencies2 = efficiencies2[efficiencies2 != 0]
efficiencies2.index = range(0, len(efficiencies2))
efficiencies2 = np.log(efficiencies2)
efficiencies2.plot(title = 'efficiency log')
maxChapter = allData.loc['maxChapter'].sort_values()
maxChapter.index = range(0, len(allData.columns))
maxChapter.plot(title = 'maxChapter')
len(allData.columns)
userIds = getAllResponders()
_source = correctAnswers
# _source is used as correction source, if we want to include answers to these questions
#def getAllUserVectorData( userIds, _source = [] ):
# result
isInitialized = False
allData = []
f = FloatProgress(min=0, max=len(userIds))
display(f)
for userId in userIds:
#print(str(userId))
f.value += 1
if not isInitialized:
isInitialized = True
allData = getUserDataVector(userId, _source = _source)
else:
allData = pd.concat([allData, getUserDataVector(userId, _source = _source)], axis=1)
#print('done')
allData
userId
methods = ['pearson', 'kendall', 'spearman']
_allUserVectorData = allData.T
_method = methods[0]
_title='RedMetrics Correlations'
_abs=True
_clustered=False
_figsize = (20,20)
#def plotAllUserVectorDataCorrelationMatrix(
# _allUserVectorData,
# _method = methods[0],
# _title='RedMetrics Correlations',
# _abs=False,
# _clustered=False,
# _figsize = (20,20)
#):
_progress = FloatProgress(min=0, max=3)
display(_progress)
# computation of correlation matrix
_m = _method
if(not (_method in methods)):
_m = methods[0]
_correlation = _allUserVectorData.astype(float).corr(_m)
_progress.value += 1
if(_abs):
_correlation = _correlation.abs()
_progress.value += 1
# plot
if(_clustered):
sns.clustermap(_correlation,cmap=plt.cm.jet,square=True,figsize=_figsize)
else:
_fig = plt.figure(figsize=_figsize)
_ax = plt.subplot(111)
_ax.set_title(_title)
sns.heatmap(_correlation,ax=_ax,cmap=plt.cm.jet,square=True)
_progress.value += 1
gform[QTemporality].unique()
allData.loc['scoreundefined'].dropna()
getAllUsers(rmdf1522)[:10]
len(getAllUsers(rmdf1522))
userSessionsRelevantColumns = ['customData.localplayerguid', 'sessionId']
userSessions = rmdf1522[rmdf1522['type']=='start'].loc[:,userSessionsRelevantColumns]
userSessions = userSessions.rename(index=str, columns={'customData.localplayerguid': 'userId'})
userSessions.head()
#groupedUserSessions = userSessions.groupby('customData.localplayerguid')
#groupedUserSessions.head()
#groupedUserSessions.describe().head()
checkpointsRelevantColumns = ['sessionId', 'customData.localplayerguid', 'type', 'section', 'userTime']
checkpoints = rmdf1522.loc[:, checkpointsRelevantColumns]
checkpoints = checkpoints[checkpoints['type']=='reach'].loc[:,['section','sessionId','userTime']]
checkpoints = checkpoints[checkpoints['section'].str.startswith('tutorial', na=False)]
#checkpoints = checkpoints.groupby("sessionId")
#checkpoints = checkpoints.max()
checkpoints.head()
#assembled = userSessions.combine_first(checkpoints)
assembled = pd.merge(userSessions, checkpoints, on='sessionId', how='outer')
assembled.head()
userSections = assembled.drop('sessionId', 1)
userSections.head()
userSections = userSections.dropna()
userSections.head()
checkpoints = userSections.groupby("userId")
checkpoints = checkpoints.max()
checkpoints.head()
#userTimedSections = userSections.groupby("userId").agg({ "userTime": np.min })
#userTimedSections = userSections.groupby("userId")
userTimes = userSections.groupby("userId").agg({ "userTime": [np.min, np.max] })
userTimes["duration"] = pd.to_datetime(userTimes["userTime"]["amax"]) - pd.to_datetime(userTimes["userTime"]["amin"])
userTimes["duration"] = userTimes["duration"].map(lambda x: np.timedelta64(x, 's'))
userTimes = userTimes.sort_values(by=['duration'], ascending=[False])
userTimes.head()
sessionCount = 1
_rmDF = rmdf1522
sample = gform
before = False
after = True
gfMode = False
rmMode = True
#def getAllUserVectorDataCustom(before, after, gfMode = False, rmMode = True, sessionCount = 1, _rmDF = rmdf1522)
userIds = []
if (before and after):
userIds = getSurveysOfUsersWhoAnsweredBoth(sample, gfMode = gfMode, rmMode = rmMode)
elif before:
if rmMode:
userIds = getRMBefores(sample)
else:
userIds = getGFBefores(sample)
elif after:
if rmMode:
userIds = getRMAfters(sample)
else:
userIds = getGFormAfters(sample)
if(len(userIds) > 0):
userIds = userIds[localplayerguidkey]
allUserVectorData = getAllUserVectorData(userIds, _rmDF = _rmDF)
allUserVectorData = allUserVectorData.T
result = allUserVectorData[allUserVectorData['sessionsCount'] == sessionCount].T
else:
print("no matching user")
result = []
result
getAllUserVectorDataCustom(False, True)
userIdsBoth = getSurveysOfUsersWhoAnsweredBoth(gform, gfMode = True, rmMode = True)[localplayerguidkey]
allUserVectorData = getAllUserVectorData(userIdsBoth)
allUserVectorData = allUserVectorData.T
allUserVectorData[allUserVectorData['sessionsCount'] == 1]
testUser = "3685a015-fa97-4457-ad73-da1c50210fe1"
def getScoreFromBinarized(binarizedAnswers):
gformIndices = binarizedAnswers.index.map(lambda s: int(s.split(correctionsColumnNameStem)[1]))
return pd.Series(np.dot(binarizedAnswers, np.ones(binarizedAnswers.shape[1])), index=gform.loc[gformIndices, localplayerguidkey])
#allResponders = getAllResponders()
#gf_both = getSurveysOfUsersWhoAnsweredBoth(gform, gfMode = True, rmMode = False)
rm_both = getSurveysOfUsersWhoAnsweredBoth(gform, gfMode = False, rmMode = True)
#gfrm_both = getSurveysOfUsersWhoAnsweredBoth(gform, gfMode = True, rmMode = True)
sciBinarizedBefore = getAllBinarized(_form = getRMBefores(rm_both))
sciBinarizedAfter = getAllBinarized(_form = getRMAfters(rm_both))
scoresBefore = getScoreFromBinarized(sciBinarizedBefore)
scoresAfter = getScoreFromBinarized(sciBinarizedAfter)
medianBefore = np.median(scoresBefore)
medianAfter = np.median(scoresAfter)
maxScore = sciBinarizedBefore.shape[1]
indicators = pd.DataFrame()
indicators[answerTemporalities[0]] = scoresBefore
indicators[answerTemporalities[1]] = scoresAfter
indicators['delta'] = scoresAfter - scoresBefore
indicators['maxPotentialDelta'] = maxScore - scoresBefore
for index in indicators['maxPotentialDelta'].index:
if (indicators.loc[index, 'maxPotentialDelta'] == 0):
indicators.loc[index, 'maxPotentialDelta'] = 1
indicators['relativeBefore'] = scoresBefore / medianBefore
indicators['relativeAfter'] = scoresAfter / medianBefore
indicators['relativeDelta'] = indicators['delta'] / medianBefore
indicators['realizedPotential'] = indicators['delta'] / indicators['maxPotentialDelta']
indicators['increaseRatio'] = indicators[answerTemporalities[0]]
for index in indicators['increaseRatio'].index:
if (indicators.loc[index, 'increaseRatio'] == 0):
indicators.loc[index, 'increaseRatio'] = 1
indicators['increaseRatio'] = indicators['delta'] / indicators['increaseRatio']
indicators
(min(indicators['relativeBefore']), max(indicators['relativeBefore'])),\
(min(indicators['relativeDelta']), max(indicators['relativeDelta'])),\
medianBefore,\
np.median(indicators['relativeBefore']),\
np.median(indicators['relativeDelta'])\
indicatorX = 'relativeBefore'
indicatorY = 'relativeDelta'
def scatterPlotIndicators(indicatorX, indicatorY):
print(indicatorX + ' range: ' + str((min(indicators[indicatorX]), max(indicators[indicatorX]))))
print(indicatorY + ' range: ' + str((min(indicators[indicatorY]), max(indicators[indicatorY]))))
print(indicatorX + ' median: ' + str(np.median(indicators[indicatorX])))
print(indicatorY + ' median: ' + str(np.median(indicators[indicatorY])))
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(indicators[indicatorX], indicators[indicatorY])
plt.xlabel(indicatorX)
plt.ylabel(indicatorY)
# vertical line
plt.plot( [np.median(indicators[indicatorX]), np.median(indicators[indicatorX])],\
[min(indicators[indicatorY]), max(indicators[indicatorY])],\
'k-', lw=2)
# horizontal line
plt.plot( [min(indicators[indicatorX]), max(indicators[indicatorX])],\
[np.median(indicators[indicatorY]), np.median(indicators[indicatorY])],\
'k-', lw=2)
indicators.columns
scatterPlotIndicators('relativeBefore', 'relativeDelta')
scatterPlotIndicators('relativeBefore', 'realizedPotential')
scatterPlotIndicators('relativeBefore', 'increaseRatio')
scatterPlotIndicators('relativeBefore', 'relativeAfter')
scatterPlotIndicators('maxPotentialDelta', 'realizedPotential')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data vectors of users
Step2: getAllUserVectorData
Step3: Correlation Matrix
Step4: List of users and their sessions
Step5: List of sessions with their checkpoints achievements
Step6: Assembly of both
Step7: Time analysis
Step8: TODO
Step9: user progress classification
|
7,155
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-cm2-hr4', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
7,156
|
<ASSISTANT_TASK:>
Python Code:
# Toy Features Dictionary
features = {"sq_footage": [ 1000, 2000, 3000, 4000, 5000],
"house_type": ["house", "house", "apt", "apt", "townhouse"]}
feat_cols = [
tf.feature_column.numeric_column('sq_footage'),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'house_type',['house','apt']
))
]
tf.feature_column.input_layer(features,feat_cols)
feat_cols = [
tf.feature_column.numeric_column('sq_footage'),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'house_type',['house','apt'], default_value=0
))
]
tf.feature_column.input_layer(features,feat_cols)
feat_cols = [
tf.feature_column.numeric_column('sq_footage'),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'house_type',['house','apt'], num_oov_buckets=1
))
]
tf.feature_column.input_layer(features,feat_cols)
feat_cols = [
tf.feature_column.numeric_column('sq_footage'),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_hash_bucket(
'house_type',5
))
]
tf.feature_column.input_layer(features,feat_cols)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Feature Column Definition
Step2: Inspect Transformed Data
Step3: Excercise 1
Step4: Excercise 2
Step5: Excercise 3
|
7,157
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow==2.7.0
!pip install tensorflow-quantum
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
np.random.seed(1234)
def generate_random_qnn(qubits, symbol, depth):
Generate random QNN's with the same structure from McClean et al.
circuit = cirq.Circuit()
for qubit in qubits:
circuit += cirq.ry(np.pi / 4.0)(qubit)
for d in range(depth):
# Add a series of single qubit rotations.
for i, qubit in enumerate(qubits):
random_n = np.random.uniform()
random_rot = np.random.uniform(
) * 2.0 * np.pi if i != 0 or d != 0 else symbol
if random_n > 2. / 3.:
# Add a Z.
circuit += cirq.rz(random_rot)(qubit)
elif random_n > 1. / 3.:
# Add a Y.
circuit += cirq.ry(random_rot)(qubit)
else:
# Add a X.
circuit += cirq.rx(random_rot)(qubit)
# Add CZ ladder.
for src, dest in zip(qubits, qubits[1:]):
circuit += cirq.CZ(src, dest)
return circuit
generate_random_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2)
def process_batch(circuits, symbol, op):
Compute the variance of a batch of expectations w.r.t. op on each circuit that
contains `symbol`. Note that this method sets up a new compute graph every time it is
called so it isn't as performant as possible.
# Setup a simple layer to batch compute the expectation gradients.
expectation = tfq.layers.Expectation()
# Prep the inputs as tensors
circuit_tensor = tfq.convert_to_tensor(circuits)
values_tensor = tf.convert_to_tensor(
np.random.uniform(0, 2 * np.pi, (n_circuits, 1)).astype(np.float32))
# Use TensorFlow GradientTape to track gradients.
with tf.GradientTape() as g:
g.watch(values_tensor)
forward = expectation(circuit_tensor,
operators=op,
symbol_names=[symbol],
symbol_values=values_tensor)
# Return variance of gradients across all circuits.
grads = g.gradient(forward, values_tensor)
grad_var = tf.math.reduce_std(grads, axis=0)
return grad_var.numpy()[0]
n_qubits = [2 * i for i in range(2, 7)
] # Ranges studied in paper are between 2 and 24.
depth = 50 # Ranges studied in paper are between 50 and 500.
n_circuits = 200
theta_var = []
for n in n_qubits:
# Generate the random circuits and observable for the given n.
qubits = cirq.GridQubit.rect(1, n)
symbol = sympy.Symbol('theta')
circuits = [
generate_random_qnn(qubits, symbol, depth) for _ in range(n_circuits)
]
op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])
theta_var.append(process_batch(circuits, symbol, op))
plt.semilogy(n_qubits, theta_var)
plt.title('Gradient Variance in QNNs')
plt.xlabel('n_qubits')
plt.xticks(n_qubits)
plt.ylabel('$\\partial \\theta$ variance')
plt.show()
def generate_identity_qnn(qubits, symbol, block_depth, total_depth):
Generate random QNN's with the same structure from Grant et al.
circuit = cirq.Circuit()
# Generate initial block with symbol.
prep_and_U = generate_random_qnn(qubits, symbol, block_depth)
circuit += prep_and_U
# Generate dagger of initial block without symbol.
U_dagger = (prep_and_U[1:])**-1
circuit += cirq.resolve_parameters(
U_dagger, param_resolver={symbol: np.random.uniform() * 2 * np.pi})
for d in range(total_depth - 1):
# Get a random QNN.
prep_and_U_circuit = generate_random_qnn(
qubits,
np.random.uniform() * 2 * np.pi, block_depth)
# Remove the state-prep component
U_circuit = prep_and_U_circuit[1:]
# Add U
circuit += U_circuit
# Add U^dagger
circuit += U_circuit**-1
return circuit
generate_identity_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2, 2)
block_depth = 10
total_depth = 5
heuristic_theta_var = []
for n in n_qubits:
# Generate the identity block circuits and observable for the given n.
qubits = cirq.GridQubit.rect(1, n)
symbol = sympy.Symbol('theta')
circuits = [
generate_identity_qnn(qubits, symbol, block_depth, total_depth)
for _ in range(n_circuits)
]
op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])
heuristic_theta_var.append(process_batch(circuits, symbol, op))
plt.semilogy(n_qubits, theta_var)
plt.semilogy(n_qubits, heuristic_theta_var)
plt.title('Heuristic vs. Random')
plt.xlabel('n_qubits')
plt.xticks(n_qubits)
plt.ylabel('$\\partial \\theta$ variance')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 不毛の高原
Step2: TensorFlow Quantum をインストールします。
Step3: 次に、TensorFlow とモジュールの依存関係をインポートします。
Step5: 1. 概要
Step7: ここでは、1 つのパラメータ $\theta_{1,1}$ の勾配を調査します。$\theta_{1,1}$ が存在する回路にsympy.Symbolを配置します。回路内の他のシンボルの統計は分析しないので、ここでランダムな値に置き換えます。
Step8: 3.1 セットアップして実行する
Step10: このプロットは、量子機械学習の問題では、ランダムな QNN 仮説を単純に推測しても、最良の結果を期待することはできないことを示しています。学習が発生する可能性のある点まで勾配を変化させるには、モデル回路に何らかの構造が存在する必要があります。
Step11: 4.2 比較
|
7,158
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
MODEL_DIR = BUCKET_NAME + "/boston"
model_path_to_deploy = MODEL_DIR
! rm -rf custom
! mkdir custom
! mkdir custom/trainer
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
! python custom/trainer/task.py --epochs=10 --model-dir=$MODEL_DIR
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
model.evaluate(x_test, y_test)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
ENDPOINT_NAME = "boston_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "boston_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape)
def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None)
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: Hardware Accelerators
Step13: Container (Docker) image
Step14: Machine Type
Step15: Tutorial
Step16: Train a model locally
Step17: Task.py contents
Step18: Train the model
Step19: Load the saved model
Step20: Evaluate the model
Step21: Perform the model evaluation
Step22: Upload the model for serving
Step23: Upload the model
Step24: Get Model resource information
Step25: Deploy the Model resource
Step26: Now get the unique identifier for the Endpoint resource you created.
Step27: Compute instance scaling
Step28: Deploy Model resource to the Endpoint resource
Step29: Make a online prediction request
Step30: Send the prediction request
Step31: Undeploy the Model resource
Step32: Cleaning up
|
7,159
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from datetime import datetime
s = pd.Series([0.13, 0.21, 0.15, 'NaN', 0.29, 0.09, 0.24, -10], dtype='f',
index = [datetime(2015,11,16,15,41,23), datetime(2015,11,16,15,42,22), datetime(2015,11,16,15,43,25), datetime(2015,11,16,15,44,20), datetime(2015,11,16,15,45,22),
datetime(2015,11,16,15,46,23), datetime(2015,11,16,15,47,26), datetime(2015,11,16,15,48,21)])
print(s)
s = s[s>0]
print(s)
s.resample('5min').max()
colours = pd.Series({'Blue': 42, 'Green': 12, 'Yellow': 37})
colours
print(colours[1])
print(colours[:-1])
print(colours['Blue'])
print(colours[1:]['Green'])
more_colours = pd.Series({'Blue': 16, 'Red': 22,
'Purple': 34, 'Green': 25,})
more_colours + colours
colours.mean(), colours.max()
df = pd.DataFrame({'First': colours, 'Second': more_colours})
print(df)
# Column by index
print(df['First'])
# Column as attribute
print(df.First)
# Row via ix
print(df.ix['Blue'])
df.max()
df.sum()
example_csv = pd.read_csv('../resources/B1_mosquito_data.csv',
parse_dates=True, index_col=0)
example_csv[0:10]
example_csv.corr()
from cis import read_data_list
aerosol_cci_collocated = read_data_list('col_output.nc', '*')
cis_df = aerosol_cci_collocated.as_data_frame()
cis_df
# Now we can do cool Pandas stuff!
cis_df.ix[cis_df['NUMBER_CONCENTRATION'].argmin()]
cis_short = cis_df.dropna()
cis_short.ix[cis_short['NUMBER_CONCENTRATION'].argmin()]
%matplotlib inline
cis_df['NUMBER_CONCENTRATION'].plot(kind='kde', xlim=[0,1000], label='Raw')
cis_df['NUMBER_CONCENTRATION'].resample('10min').mean().plot(kind='kde', label='10min')
ax=cis_df['NUMBER_CONCENTRATION'].resample('120min').mean().plot(kind='kde', label='120min')
ax.legend()
from pandas.tools.plotting import scatter_matrix
m = scatter_matrix(cis_df, alpha=0.2, figsize=(8, 8), diagonal='kde', edgecolors='none')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Series
Step2: As you can see, it's dealt with our missing value nicely - this is one of the nice things about Pandas.
Step3: Note this also got rid of our NaN (as NaN comparisons are always negative)
Step4: Another way of creating series is using dictionaries
Step5: We can index Series just like numpy arrays, or using the named index
Step6: Or both
Step7: Another nice benefit of the indices is in data allignment. So for example when performing operations on two series, Pandas will line up the indices first
Step8: As you can see, if not both of the indices are present then Pandas will return NaNs.
Step9: DataFrames
Step10: And can be indexed by row, or index via the ix attribute
Step11: We can then apply many of the same numpy functions on this data, on a per column basis
Step12: Reading Excel/CSV files
Step13: Using Pandas with CIS data
Step14: Exercise
Step15: Extras
|
7,160
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
from statsmodels.graphics.tsaplots import plot_predict
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.tsa.arima.model import ARIMA
np.random.seed(12345)
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arparams = np.r_[1, -arparams]
maparams = np.r_[1, maparams]
nobs = 250
y = arma_generate_sample(arparams, maparams, nobs)
dates = pd.date_range("1980-1-1", freq="M", periods=nobs)
y = pd.Series(y, index=dates)
arma_mod = ARIMA(y, order=(2, 0, 2), trend="n")
arma_res = arma_mod.fit()
print(arma_res.summary())
y.tail()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10, 8))
fig = plot_predict(arma_res, start="1999-06-30", end="2001-05-31", ax=ax)
legend = ax.legend(loc="upper left")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate some data from an ARMA process
Step2: The conventions of the arma_generate function require that we specify a 1 for the zero-lag of the AR and MA parameters and that the AR parameters be negated.
Step3: Now, optionally, we can add some dates information. For this example, we'll use a pandas time series.
|
7,161
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from pandas import date_range
import bqplot.pyplot as plt
from bqplot import *
security_1 = np.cumsum(np.random.randn(150)) + 100.
security_2 = np.cumsum(np.random.randn(150)) + 100.
fig = plt.figure(title='Security 1')
axes_options = {'x': {'label': 'Index'}, 'y': {'label': 'Price'}}
# x values default to range of values when not specified
line = plt.plot(security_1, axes_options=axes_options)
fig
line.colors = ['DarkOrange']
# The opacity allows us to display the Line while featuring other Marks that may be on the Figure
line.opacities = [.5]
line.stroke_width = 2.5
line.line_style = 'dashed'
line.interpolation = 'basis'
line.marker = 'triangle-down'
# Here we define the dates we would like to use
dates = date_range(start='01-01-2007', periods=150)
fig = plt.figure(title='Time Series')
axes_options = {'x': {'label': 'Date'}, 'y': {'label': 'Security 1'}}
time_series = plt.plot(dates, security_1,
axes_options=axes_options)
fig
dates_new = date_range(start='06-01-2007', periods=150)
fig = plt.figure()
axes_options = {'x': {'label': 'Date'}, 'y': {'label': 'Price'}}
line = plt.plot(dates, [security_1, security_2],
labels=['Security 1', 'Security 2'],
axes_options=axes_options,
display_legend=True)
fig
line.x, line.y = [dates, dates_new], [security_1, security_2]
fig = plt.figure()
axes_options = {'x': {'label': 'Date'},
'y': {'label': 'Security 1'},
'color' : {'visible': False}}
# add a custom color scale to color the lines
plt.scales(scales={'color': ColorScale(colors=['Red', 'Green'])})
dates_color = date_range(start='06-01-2007', periods=150)
securities = 100. + np.cumsum(np.random.randn(150, 10), axis=0)
# we generate 10 random price series and 10 random positions
positions = np.random.randint(0, 2, size=10)
# We pass the color scale and the color data to the plot method
line = plt.plot(dates_color, securities.T, color=positions,
axes_options=axes_options)
fig
line.color = None
fig = plt.figure(animation_duration=1000)
patch = plt.plot([],[],
fill_colors=['orange', 'blue', 'red'],
fill='inside',
axes_options={'x': {'visible': False}, 'y': {'visible': False}},
stroke_width=10,
close_path=True,
display_legend=True)
patch.x = [[0, 2, 1.2], [0.5, 2.5, 1.7], [4, 5, 6, 6, 5, 4, 3]],
patch.y = [[0, 0, 1], [0.5, 0.5, -0.5], [1, 1.1, 1.2, 2.3, 2.2, 2.7, 1.0]]
fig
patch.fill = 'top'
patch.fill = 'bottom'
patch.opacities = [0.1, 0.2]
patch.x = [[2, 3, 3.2], [0.5, 2.5, 1.7], [4,5,6, 6, 5, 4, 3]]
patch.close_path = False
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Line Chart
Step2: We can explore the different attributes by changing each of them for the plot above
Step3: In a similar way, we can also change any attribute after the plot has been displayed to change the plot. Run each of the cells below, and try changing the attributes to explore the different features and how they affect the plot.
Step4: While a Lines plot allows the user to extract the general shape of the data being plotted, there may be a need to visualize discrete data points along with this shape. This is where the markers attribute comes in.
Step5: The marker attributes accepts the values square, circle, cross, diamond, square, triangle-down, triangle-up, arrow, rectangle, ellipse. Try changing the string above and re-running the cell to see how each marker type looks.
Step6: Plotting multiples sets of data
Step7: We pass each data set as an element of a list
Step8: Similarly, we can also pass multiple x-values for multiple sets of y-values
Step9: Coloring Lines according to data
Step10: We can also reset the colors of the Line to their defaults by setting the color attribute to None.
Step11: Patches
|
7,162
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
def blight_model():
# Your code here
return # Your answer here
df_train = pd.read_csv('train.csv', encoding = "ISO-8859-1")
df_test = pd.read_csv('test.csv', encoding = "ISO-8859-1")
df_train.columns
list_to_remove = ['balance_due',
'collection_status',
'compliance_detail',
'payment_amount',
'payment_date',
'payment_status']
list_to_remove_all = ['violator_name', 'zip_code', 'country', 'city',
'inspector_name', 'violation_street_number', 'violation_street_name',
'violation_zip_code', 'violation_description',
'mailing_address_str_number', 'mailing_address_str_name',
'non_us_str_code',
'ticket_issued_date', 'hearing_date']
df_train.drop(list_to_remove, axis=1, inplace=True)
df_train.drop(list_to_remove_all, axis=1, inplace=True)
df_test.drop(list_to_remove_all, axis=1, inplace=True)
df_train.drop('grafitti_status', axis=1, inplace=True)
df_test.drop('grafitti_status', axis=1, inplace=True)
df_train.head()
df_train.violation_code.unique().size
df_train.disposition.unique().size
df_latlons = pd.read_csv('latlons.csv')
df_latlons.head()
df_address = pd.read_csv('addresses.csv')
df_address.head()
df_id_latlons = df_address.set_index('address').join(df_latlons.set_index('address'))
df_id_latlons.head()
df_train = df_train.set_index('ticket_id').join(df_id_latlons.set_index('ticket_id'))
df_test = df_test.set_index('ticket_id').join(df_id_latlons.set_index('ticket_id'))
df_train.head()
df_train.agency_name.value_counts()
# df_train.country.value_counts()
# so we remove zip code and country as well
vio_code_freq10 = df_train.violation_code.value_counts().index[0:10]
vio_code_freq10
df_train['violation_code_freq10'] = [list(vio_code_freq10).index(c) if c in vio_code_freq10 else -1 for c in df_train.violation_code ]
df_train.head()
df_train.violation_code_freq10.value_counts()
# drop violation code
df_train.drop('violation_code', axis=1, inplace=True)
df_test['violation_code_freq10'] = [list(vio_code_freq10).index(c) if c in vio_code_freq10 else -1 for c in df_test.violation_code ]
df_test.drop('violation_code', axis=1, inplace=True)
#df_train.grafitti_status.fillna('None', inplace=True)
#df_test.grafitti_status.fillna('None', inplace=True)
df_train = df_train[df_train.compliance.isnull() == False]
df_train.isnull().sum()
df_test.isnull().sum()
df_train.lat.fillna(method='pad', inplace=True)
df_train.lon.fillna(method='pad', inplace=True)
df_train.state.fillna(method='pad', inplace=True)
df_test.lat.fillna(method='pad', inplace=True)
df_test.lon.fillna(method='pad', inplace=True)
df_test.state.fillna(method='pad', inplace=True)
df_train.isnull().sum().sum()
df_test.isnull().sum().sum()
df_train.head()
one_hot_encode_columns = ['agency_name', 'state', 'disposition']
[ df_train[c].unique().size for c in one_hot_encode_columns]
# So remove city and states...
one_hot_encode_columns = ['agency_name', 'state', 'disposition']
df_train = pd.get_dummies(df_train, columns=one_hot_encode_columns)
df_test = pd.get_dummies(df_test, columns=one_hot_encode_columns)
df_train.head()
from sklearn.model_selection import train_test_split
train_features = df_train.columns.drop('compliance')
train_features
X_data, X_keep, y_data, y_keep = train_test_split(df_train[train_features],
df_train.compliance,
random_state=0,
test_size=0.05)
print(X_data.shape, X_keep.shape)
X_train, X_test, y_train, y_test = train_test_split(X_data[train_features],
y_data,
random_state=0,
test_size=0.2)
print(X_train.shape, X_test.shape)
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = MLPClassifier(hidden_layer_sizes = [50], alpha = 5,
random_state = 0,
solver='lbfgs')
clf.fit(X_train_scaled, y_train)
print(clf.loss_)
clf.score(X_train_scaled, y_train)
clf.score(X_test_scaled, y_test)
from sklearn.metrics import recall_score, precision_score, f1_score
train_pred = clf.predict(X_train_scaled)
print(precision_score(y_train, train_pred),
recall_score(y_train, train_pred),
f1_score(y_train, train_pred))
from sklearn.metrics import recall_score, precision_score, f1_score
test_pred = clf.predict(X_test_scaled)
print(precision_score(y_test, test_pred),
recall_score(y_test, test_pred),
f1_score(y_test, test_pred))
test_pro = clf.predict_proba(X_test_scaled)
def draw_roc_curve():
%matplotlib notebook
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
fpr_lr, tpr_lr, _ = roc_curve(y_test, test_pro[:,1])
roc_auc_lr = auc(fpr_lr, tpr_lr)
plt.figure()
plt.xlim([-0.01, 1.00])
plt.ylim([-0.01, 1.01])
plt.plot(fpr_lr, tpr_lr, lw=3, label='LogRegr ROC curve (area = {:0.2f})'.format(roc_auc_lr))
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curve (1-of-10 digits classifier)', fontsize=16)
plt.legend(loc='lower right', fontsize=13)
plt.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
plt.axes().set_aspect('equal')
plt.show()
draw_roc_curve()
test_pro[0:10]
clf.predict(X_test_scaled[0:10])
y_test[0:10]
1 - y_train.sum()/len(y_train)
from sklearn.metrics import recall_score, precision_score, f1_score
test_pred = clf.predict(X_test_scaled)
print(precision_score(y_test, test_pred),
recall_score(y_test, test_pred),
f1_score(y_test, test_pred))
def draw_pr_curve():
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve, auc
precision, recall, thresholds = precision_recall_curve(y_test, test_pro[:,1])
print(len(thresholds))
idx = min(range(len(thresholds)), key=lambda i: abs(thresholds[i]-0.5))
print(idx)
print(np.argmin(np.abs(thresholds)))
closest_zero = idx # np.argmin(np.abs(thresholds))
closest_zero_p = precision[closest_zero]
closest_zero_r = recall[closest_zero]
import matplotlib.pyplot as plt
plt.figure()
plt.xlim([0.0, 1.01])
plt.ylim([0.0, 1.01])
plt.plot(precision, recall, label='Precision-Recall Curve')
plt.plot(closest_zero_p, closest_zero_r, 'o', markersize = 12, fillstyle = 'none', c='r', mew=3)
plt.xlabel('Precision', fontsize=16)
plt.ylabel('Recall', fontsize=16)
plt.axes().set_aspect('equal')
plt.show()
return thresholds
thresholds = draw_pr_curve()
import matplotlib.pyplot as plt
%matplotlib notebook
plt.plot(thresholds)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1:
Step2: Train, keep, test split
Step3: Train a NeuralNet and see the performance
|
7,163
|
<ASSISTANT_TASK:>
Python Code:
reviews_test = pd.read_csv('data/reviews_test.csv', header=0, encoding='utf-8')
reviews_train = pd.read_csv('data/reviews_train.csv', header=0, encoding='utf-8')
X_train_raw = reviews_train.comment
y_train_raw = reviews_train.reting
X_test_raw = reviews_test.comment
y_test_raw = reviews_test.reting
DIR = 'data/w2v_models/'
MODEL_NAME = 'tenth.norm-sz500-w7-cb0-it5-min5.w2v'
VECTOR_SIZE = 500
SENTENCE_LENGTH = 100 #words
w2v_path = DIR + MODEL_NAME
sentence_processor = SentenceProcessor(w2v_path)
# words with wery high freqyency in comments
# garbage_list = ['я', 'большой', 'по', 'купить', 'этот', 'на', 'один', 'так', 'только', 'из', 'хороший', 'как', \
# 'отличный', 'что', 'это', 'и', 'за', 'у', 'в', 'если', 'с', 'очень', 'нет', 'же', 'он', 'при', \
# 'для', 'пользоваться', 'быть', 'а', 'просто', 'раз', 'работать', 'но', 'качество', 'к', 'весь',\
# 'можно', 'есть', 'цена', 'от', 'уже', 'такой', 'она', 'год', 'то']
sentence_processor.stop_list = []
X_train = []
y_train = []
for i in tqdm(range(len(X_train_raw))):
sent = sentence_processor.process(X_train_raw[i])
matrix = sentence_processor.convert2matrix(sent, sample_len=SENTENCE_LENGTH)
if matrix.shape == (SENTENCE_LENGTH, VECTOR_SIZE):
X_train.append(matrix)
y_train.append(y_train_raw[i])
X_test = []
y_test = []
for i in tqdm(range(len(X_test_raw))):
sent = sentence_processor.process(X_test_raw[i])
matrix = sentence_processor.convert2matrix(sent, sample_len=SENTENCE_LENGTH)
if matrix.shape == (SENTENCE_LENGTH, VECTOR_SIZE):
X_test.append(matrix)
y_test.append(y_test_raw[i])
X_train = np.array(X_train, dtype=np.float32)
X_test = np.array(X_test, dtype=np.float32)
y_train = np.array(y_train, dtype=np.float32)
y_test = np.array(y_test, dtype=np.float32)
reviews_internet = pd.read_csv('data/internet_reviews.csv', header=0, encoding='utf-8')
X_reviews_internet = reviews_internet.comment
y_reviews_internet = reviews_internet.rating
X_reviews_internet_ = []
y_reviews_internet_ = []
for i in tqdm(range(len(X_reviews_internet))):
sent = sentence_processor.process(X_reviews_internet[i])
matrix = sentence_processor.convert2matrix(sent, sample_len=SENTENCE_LENGTH)
if matrix.shape == (SENTENCE_LENGTH, VECTOR_SIZE):
X_reviews_internet_.append(matrix)
y_reviews_internet_.append(y_reviews_internet[i])
X_reviews_internet_ = np.array(X_reviews_internet_, dtype=np.float32)
y_reviews_internet_ = np.array(y_reviews_internet_, dtype=np.float32)
X_train_final = np.concatenate((X_train, X_reviews_internet_), axis=0)
y_train_final = np.concatenate((y_train, y_reviews_internet_), axis=0)
from keras.models import Sequential
import keras
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LocallyConnected1D, Conv1D, Dropout
from keras.layers import MaxPooling1D, GlobalMaxPooling1D
from keras.layers.recurrent import LSTM
from keras.preprocessing import sequence
from keras.optimizers import Adam, SGD
from keras.models import Model
from keras.layers.merge import concatenate
from keras import regularizers
from keras.layers import Input, Dense
input_1 = Input(shape=(100,500))
conv_1 = Conv1D(filters=256, kernel_size=3, activation='relu', kernel_regularizer=regularizers.l2(0.02))(input_1)
pool_1 = GlobalMaxPooling1D()(conv_1)
conv_2 = Conv1D(filters=256, kernel_size=5, activation='relu', kernel_regularizer=regularizers.l2(0.02))(input_1)
pool_2 = GlobalMaxPooling1D()(conv_2)
conv_3 = Conv1D(filters=512, kernel_size=7, activation='relu', kernel_regularizer=regularizers.l2(0.02))(input_1)
pool_3 = GlobalMaxPooling1D()(conv_3)
conv_4 = Conv1D(filters=512, kernel_size=9, activation='relu', kernel_regularizer=regularizers.l2(0.02))(input_1)
pool_4 = GlobalMaxPooling1D()(conv_4)
concat_1 = concatenate([pool_1, pool_2, pool_3, pool_4], axis=1)
dense_1 = Dense(300, activation='relu')(concat_1)
drop_1 = Dropout(0.5)(dense_1)
dense_4 = Dense(1, activation=None)(drop_1)
model = Model(inputs=input_1, outputs=dense_4)
model.summary()
sgd = SGD(lr=0.00005)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['mse'])
model.fit(X_train_final, y_train_final, batch_size=10, epochs=15, validation_data=(X_test, y_test), shuffle=True,
verbose=True)
model.save('trained_model_2(keras==2.0.8)')
import sklearn
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
def get_score(model, x, y, plot=True, sparse=50):
y_pred = model.predict(x)
y_pred = np.clip(y_pred, 1.0, 5.0)
mse = mean_squared_error(y, y_pred)
mae = mean_absolute_error(y, y_pred)
medae = median_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
print ('{:.4} \nMSE: {:.4}\nMAE: {:.4}\nMedianAE: {:.4}\nR2 score: {:.4}'.format(model.name, mse, mae, medae, r2))
if plot:
plt.figure(figsize=(20,5))
plt.title(model.name)
plt.ylabel('Score')
plt.plot(y_pred[::sparse])
plt.plot(y[::sparse])
plt.legend(('y_pred', 'y_test'))
plt.show()
return {'mean squared error':mse, 'mean absolute error':mae, 'median absolute error':medae, 'r2 score':r2}
get_score(model, X_test, y_test, sparse=50)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Загрузка модели word2vec
Step2: Подготовка данных
Step3: Обучение модели
Step4: Результаты
|
7,164
|
<ASSISTANT_TASK:>
Python Code:
# Share functions used in multiple notebooks
%run Shared-Functions.ipynb
# Load up the packages to investigate the data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
import seaborn as sns
import os
# OS-independent way to navigate the file system
# Data directory is one directory up in relation to directory of this notebook
data_dir_root = os.path.normpath(os.getcwd() + os.sep + os.pardir)
# Where the file is
file_url = data_dir_root + os.sep + "Data" + os.sep + "food-truck-profits.txt"
# Load the data into a dataframe
data = pd.read_csv(file_url, header=None, names=['Population', 'Profit'])
# Quick check on what we have
data.shape
data.head()
# Visualize the data
data.plot.scatter(x='Population', y='Profit', figsize=(8,6));
# Calculate some useful statistics showing how the data is distributed
data.describe()
# Here are the input values
# Number of columns in our dataset
cols = data.shape[1]
# Inputs are in the first column - indexed as 0
X = data.iloc[:, 0:cols-1]
# Alternatively, X = data['Population']
print("Number of columns in the dataset {}".format(cols))
print("First few inputs\n {}".format(X.head()))
# The last few values of X
X.tail()
# Here are the output vaues
# Outputs are in the second column - indexed as 1
y = data.iloc[:, cols-1:cols]
# Alternatively, y = data['Profits']
# See a sample of the outputs
y.head()
# Last few items of the ouput
y.tail()
# A Handful of Penalty Functions
# Generate the error range
x = np.linspace(-10,10,100)
[penaltyPlot(x, pen) for pen in penaltyFunctions.keys()];
penalty(X,y,[-10, 1], VPenalty)
penalty(X,y,[-10, 1], invertedVPenalty)
# Visualize what np.meshgrid does when used with plot
w0 = np.linspace(1,5,5)
w1 = np.linspace(1,5,5)
W0, W1 = np.meshgrid(w0,w1)
plt.plot(W0,W1, marker='*', color='g', linestyle='none');
# Plot the cost surface
# From https://stackoverflow.com/questions/9170838
# See Also: Helpful matplotlib tutorial at
# http://jeffskinnerbox.me/notebooks/matplotlib-2d-and-3d-plotting-in-ipython.html
# Set up a grid over w0,w1 values
w0 = np.linspace(-10,10,50)
w1 = np.linspace(-10,10,50)
W0, W1 = np.meshgrid(w0,w1)
# Get the penalty value for each point on the grid
# See the Shared-Functions.ipynb notebook for the list of defined penalty functions
# List of penalty functions in dict penaltyFunctions
penalties = np.array([penalty(X,y,[w_0,w_1], squaredPenalty) for w_0,w_1 in zip(np.ravel(W0), np.ravel(W1))])
Z = penalties.reshape(W0.shape)
# Create the plot
from mpl_toolkits.mplot3d import Axes3D
fig, ax = plt.subplots(figsize=(12,8))
ax = fig.add_subplot(1,1,1, projection='3d')
ax.set_title("Cost Surface for a " + penaltyFunctions[squaredPenalty] + " Function")
ax.set_xlabel('w0')
ax.set_ylabel('w1')
ax.set_zlabel('Cost')
p = ax.plot_surface(W0, W1, Z, rstride=4, cstride=4)
# Contour Lines
fig, ax = plt.subplots(figsize=(12,8))
plt.contour(Z, cmap=cm.RdBu,vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[-10,10,-10,10])
# Heatmap or Colormap
fig, ax = plt.subplots(figsize=(12,8))
p = ax.pcolor(W0, W1, Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p)
# Initialize the parameter values W and pick the penalty function
W_init = [1,-1.0]
penalty_function = squaredPenalty
# Test out the penalty function in the Shared-Functions notebook
penalty(X, y, W_init, penalty_function)
# Test out the gradientDescent function in the Shared-Functions notebook
gradientDescent(X, y, W_init, num_iterations=5)
# Set hyper-parameters
num_iters = 50 # number of iterations
learning_rate = 0.0005 # the learning rate
# Run gradient descent and capture the progression
# of cost values and the ultimate optimal W values
%time W_opt, final_penalty, running_w, running_penalty = gradientDescent(X, y, W_init, num_iters, learning_rate)
# Get the optimal W values and the last few cost values
W_opt, final_penalty, running_w[-5:], running_penalty[-5:]
# How the penalty changes as the number of iterations increase
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(np.arange(num_iters), running_penalty, 'g')
ax.set_xlabel('Number of Iterations')
ax.set_ylabel('Cost')
ax.set_title('Cost vs. Iterations Over the Dataset for a Specific Learning Rate');
np.array(running_w).flatten()
w0 = np.array([param[0].flatten() for param in running_w][1:]).flatten()
w1 = np.array([param[1] for param in running_w][1:]).flatten()
len(w0), len(w1), len(np.arange(num_iters))
# How the Ws change as the number of iterations increase
fig, (ax1,ax2) = plt.subplots(figsize=(14,6), nrows=1, ncols=2, sharey=False)
ax1.plot(np.arange(num_iters), w0, 'g')
ax1.set_xlabel('Number of Iterations')
ax1.set_ylabel(r'$w_{0}$')
ax1.set_title(r'$w_{0}$ vs. Iterations Over the Dataset')
ax2.plot(np.arange(num_iters), w1, 'y')
ax2.set_xlabel('Number of Iterations')
ax2.set_ylabel(r'$w_{1}$')
ax2.set_title(r'$w_{1}$ vs. Iterations Over the Dataset')
fig, ax = plt.subplots(figsize=(14,6))
ax.plot(np.arange(num_iters), w0, 'g', label="w0")
ax.plot(np.arange(num_iters), w1, 'y', label="w1")
plt.legend()
W_opt[0,0], W_opt[1,0]
# Create 100 equally spaced values going from the minimum value of population
# to the maximum value of the population in the dataset.
x = np.linspace(data.Population.min(), data.Population.max(), 100)
f = (W_opt[0, 0] * 1) + (W_opt[1, 0] * x)
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(x, f, 'g', label='Prediction')
ax.scatter(data.Population, data.Profit, label='Training Data')
ax.legend(loc='upper left')
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit vs. Population Size');
# First 5 population values in the dataset
X[0:5].values.flatten()
# Prediction of profit for the first 5 populations in the dataset
#populations = [5, 6, 12, 14, 15]
populations = X[0:5].values.flatten()
profits = [W_opt[0, 0] + (W_opt[1, 0] * pop * 10000) for pop in populations]
#print(profits)
print(['${:5,.0f}'.format(profit) for profit in profits])
# How predictions change as the learning rate and the
# number of iterations are changed
learning_rates = [0.001, 0.009]
epochs = [10, 500] # epoch is another way of saying num_iters
# All combinations of learning rates and epochs
from itertools import permutations
combos = [list(zip(epochs, p)) for p in permutations(learning_rates)]
combos
# get it into the right format to plug into the gradient descent function
combos_list = []
for i in range(len(combos)):
for j in range(len(combos[i])):
combos_list.append([combos[i][j][0], combos[i][j][1]])
combos_list
gdResults = [gradientDescent(X, y, \
W_init, combos_list[i][0], combos_list[i][1]) for i in range(len(combos_list))]
W_values = [gdResults[i][0] for i in range(len(gdResults))]
len(gdResults), len(W_values)
# Test it out
# From https://stackoverflow.com/questions/31883097/
cmap = plt.get_cmap('jet')
plot_colors = cmap(np.linspace(0, 1, len(combos_list)))
for i, (combo, color) in enumerate(zip(combos_list, plot_colors), 1):
plt.plot(x, np.sin(x)/i, label=combo, c=color)
# Create 100 equally spaced values going from the minimum value of population
# to the maximum value of the population in the dataset.
x = np.linspace(data.Population.min(), data.Population.max(), 100)
f_list = [(W_values[i][0] * 1) + (W_values[i][1] * x).T for i in range(len(W_values))]
fig, ax = plt.subplots(figsize=(12,8))
#[ax.plot(x, f_list[i], 'r', label=combos_list[i]) for i in range(len(f_list))]
for i, (combo, color) in enumerate(zip(combos_list, plot_colors), 1):
ax.plot(x, f_list[i-1], label=combo, c=color)
ax.scatter(data.Population, data.Profit, label='Dataset')
ax.legend(loc='upper left')
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit as Number of Iterations and Learning Rate Change');
# We're using the optimal W values obtained when the learning rate = 0.001
# and the number of iterations = 500
predictions = [(W_values[3][0] * 1) + (W_values[3][1] * pop) for pop in [50000, 100000, 160000, 180000]]
# Get into the right form form printing
preds = np.array(predictions).squeeze()
print(['${:5,.0f}'.format(pred) for pred in preds])
# We'll use the values of num_iters and learning_rate defined above
print("num_iters: {}".format(num_iters))
print("learning rate: {}".format(learning_rate))
# Vary the size of the dataset
dataset_sizes = [2, 5, 10, 25, 50, len(X)]
gdResults = [gradientDescent(X[0:dataset_sizes[i]], y[0:dataset_sizes[i]], \
W_init, num_iters, learning_rate) for i in range(len(dataset_sizes))]
# This allows us to color our plot lines differently without explicitly specifying colors
cmap2 = plt.get_cmap('jet')
plot_colors2 = cmap2(np.linspace(0, 1, len(dataset_sizes)))
W_values = [gdResults[i][0] for i in range(len(gdResults))]
W_values[0]
# Create 100 equally spaced values going from the minimum value of population
# to the maximum value of the population in the dataset.
x = np.linspace(data.Population.min(), data.Population.max(), 100)
f_list = [(W_values[i][0] * 1) + (W_values[i][1] * x).T for i in range(len(W_values))]
fig, ax = plt.subplots(figsize=(12,8))
#[ax.plot(x, f_list[i], 'y', label=dataset_sizes[i]) for i in range(len(f_list))]
for i, (dataset, color) in enumerate(zip(dataset_sizes, plot_colors2), 1):
ax.plot(x, f_list[i-1], label=dataset, c=color)
ax.scatter(data.Population, data.Profit, label='Dataset')
ax.legend(loc='upper left')
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit as Dataset Size Changes')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ACKNOWLEDGEMENT
Step2: This means that the dataset has 97 rows and 2 columns. Let's see what the data looks like.
Step3: Step 1
Step4: The complete data set can be described using the traditional statistical descriptors
Step5: Exercise 1
Step 2
Step6: Step 2b
Step7: Once we've identified the inputs and the output, the task is easy to define
Step8: Exercise 3
Step9: SIDEBAR - How the Penalty is Usually Written
Step10: Step 5
Step11: Run the iterative gradient descent method to determine the optimal parameter values.
Step12: We can see that the Ws are changing even after 5000 interations...but at the 4th decimal place. Similarly, the penalty is changing (decreasing) in the 100s place.
Step13: Exercise 4
Step 6
Step14: Experimenting with Hyperparameters
Step15: Exercise 5
Step16: Learning from Experience
|
7,165
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install tensorflow==1.15.3
import numpy as np
import seaborn as sns
import pandas as pd
import tensorflow as tf
SEQ_LEN = 10
def create_time_series():
freq = (np.random.random() * 0.5) + 0.1 # 0.1 to 0.6
ampl = np.random.random() + 0.5 # 0.5 to 1.5
x = np.sin(np.arange(0, SEQ_LEN) * freq) * ampl
return x
for i in range(0, 5):
sns.distplot( create_time_series() ); # 5 series
def to_csv(filename, N):
with open(filename, 'w') as ofp:
for lineno in range(0, N):
seq = create_time_series()
line = ",".join(map(str, seq))
ofp.write(line + '\n')
to_csv('train.csv', 1000) # 1000 sequences
to_csv('valid.csv', 50)
!head -5 train.csv valid.csv
import shutil
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
DEFAULTS = [[0.0] for x in range(0, SEQ_LEN)]
BATCH_SIZE = 20
TIMESERIES_COL = 'rawdata'
# In each sequence, column index 0 to N_INPUTS - 1 are features, and column index N_INPUTS to SEQ_LEN are labels
N_OUTPUTS = 1
N_INPUTS = SEQ_LEN - N_OUTPUTS
# Read data and convert to needed format
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
# Provide the ability to decode a CSV
def decode_csv(line):
# all_data is a list of scalar tensors
all_data = tf.decode_csv(line, record_defaults = DEFAULTS)
inputs = all_data[:len(all_data) - N_OUTPUTS] # first N_INPUTS values
labels = all_data[len(all_data) - N_OUTPUTS:] # last N_OUTPUTS values
# Convert each list of rank R tensors to one rank R+1 tensor
inputs = tf.stack(inputs, axis = 0)
labels = tf.stack(labels, axis = 0)
# Convert input R+1 tensor into a feature dictionary of one R+1 tensor
features = {TIMESERIES_COL: inputs}
return features, labels
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return _input_fn
LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells
# Create the inference model
def simple_rnn(features, labels, mode):
# 0. Reformat input shape to become a sequence
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
# 1. Configure the RNN
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0)
outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)
# Slice to keep only the last cell of the RNN
outputs = outputs[-1]
# Output is result of linear activation of last layer of RNN
weight = tf.get_variable("weight", initializer=tf.initializers.random_normal, shape=[LSTM_SIZE, N_OUTPUTS])
bias = tf.get_variable("bias", initializer=tf.initializers.random_normal, shape=[N_OUTPUTS])
predictions = tf.matmul(outputs, weight) + bias
# 2. Loss function, training/eval ops
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(labels, predictions)
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = 0.01,
optimizer = "SGD")
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(labels, predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions)}
# 5. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
# Create functions to read in respective datasets
def get_train():
return read_dataset(filename = 'train.csv', mode = tf.estimator.ModeKeys.TRAIN, batch_size = 512)
def get_valid():
return read_dataset(filename = 'valid.csv', mode = tf.estimator.ModeKeys.EVAL, batch_size = 512)
# Create serving input function
def serving_input_fn():
feature_placeholders = {
TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2])
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create custom estimator's train and evaluate function
def train_and_evaluate(output_dir):
estimator = tf.estimator.Estimator(model_fn = simple_rnn,
model_dir = output_dir)
train_spec = tf.estimator.TrainSpec(input_fn = get_train(),
max_steps = 1000)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(input_fn = get_valid(),
steps = None,
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run the model
shutil.rmtree('outputdir', ignore_errors = True) # start fresh each time
train_and_evaluate('outputdir')
%%bash
# Run module as-is
echo $PWD
rm -rf outputdir
export PYTHONPATH=${PYTHONPATH}:${PWD}/simplernn
python -m trainer.task \
--train_data_paths="${PWD}/train.csv*" \
--eval_data_paths="${PWD}/valid.csv*" \
--output_dir=outputdir \
--job-dir=./tmp
import tensorflow as tf
import numpy as np
def breakup(sess, x, lookback_len):
N = sess.run(tf.size(x))
windows = [tf.slice(x, [b], [lookback_len]) for b in range(0, N-lookback_len)]
windows = tf.stack(windows)
return windows
x = tf.constant(np.arange(1,11, dtype=np.float32))
with tf.Session() as sess:
print('input=', x.eval())
seqx = breakup(sess, x, 5)
print('output=', seqx.eval())
def make_keras_estimator(output_dir):
from tensorflow import keras
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, input_shape=(N_INPUTS,), name=TIMESERIES_INPUT_LAYER))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(1))
model.compile(loss = 'mean_squared_error',
optimizer = 'adam',
metrics = ['mae', 'mape']) # mean absolute [percentage] error
return keras.estimator.model_to_estimator(model)
%%bash
# Run module as-is
echo $PWD
rm -rf outputdir
export PYTHONPATH=${PYTHONPATH}:${PWD}/simplernn
python -m trainer.task \
--train_data_paths="${PWD}/train.csv*" \
--eval_data_paths="${PWD}/valid.csv*" \
--output_dir=${PWD}/outputdir \
--job-dir=./tmp --keras
%%bash
gcloud ai-platform "TODO: Insert code here with all the neeed parameters"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> RNN </h2>
Step2: <h3> Input Fn to read CSV </h3>
Step3: Reading data using the Estimator API in tf.estimator requires an input_fn. This input_fn needs to return a dict of features and the corresponding labels.
Step4: <h3> Define RNN </h3>
Step5: <h3> Estimator </h3>
Step6: <h3> Standalone Python module </h3>
Step7: <h2> Variant
Step8: Variant
Step9: Challenge Excercise
|
7,166
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
reviews.head()
labels.head()
from collections import Counter
total_counts = Counter()
for _, review in reviews.iterrows():
for word in review[0].split(' '):
total_counts[word] += 1
print("Total words in data set: ", len(total_counts))
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
print(vocab[-1], ': ', total_counts[vocab[-1]])
word2idx = {word: idx for idx, word in enumerate(vocab)}
word2idx['the']
def text_to_vector(text):
vector = np.zeros(len(vocab), dtype=np.int_)
for word in text.split(' '):
idx = word2idx.get(word, None)
if idx is None:
continue
else:
vector[idx] += 1
return vector
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
trainY
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
net = tflearn.input_data([None, len(vocab)])
net = tflearn.fully_connected(net, n_units=200, activation='ReLU')
net = tflearn.fully_connected(net, n_units=25, activation='ReLU')
net = tflearn.fully_connected(net, n_units=2, activation='softmax')
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
model = tflearn.DNN(net)
return model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=10)
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the data
Step2: Counting word frequency
Step3: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
Step4: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
Step5: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Step6: Text to vector function
Step7: If you do this right, the following code should return
Step8: Now, run through our entire review data set and convert each review to a word vector.
Step9: Train, Validation, Test sets
Step10: Building the network
Step11: Intializing the model
Step12: Training the network
Step13: Testing
Step14: Try out your own text!
|
7,167
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
%matplotlib notebook
import matplotlib
import matplotlib.pyplot as plt
import codecs
import os
import collections
from six.moves import cPickle
from six import text_type
import time
from __future__ import print_function
class Args():
def __init__(self):
'''data directory containing input.txt'''
self.data_dir = 'data_rnn/tinyshakespeare'
'''directory to store checkpointed models'''
self.save_dir = 'save_vec'
'''size of RNN hidden state'''
self.rnn_size = 128
'''minibatch size'''
self.batch_size = 1 #was 40
'''RNN sequence length'''
self.seq_length = 50
'''number of epochs'''
self.num_epochs = 1 # was 5
'''save frequency'''
self.save_every = 500 # was 500
'''Print frequency'''
self.print_every = 100 # was 100
'''clip gradients at this value'''
self.grad_clip = 5.
'''learning rate'''
self.learning_rate = 0.002 # was ?
'''decay rate for rmsprop'''
self.decay_rate = 0.98 # was 0.97?
continue training from saved model at this path. Path must contain files saved by previous training process:
'config.pkl' : configuration;
'chars_vocab.pkl' : vocabulary definitions;
'checkpoint' : paths to model file(s) (created by tf).
Note: this file contains absolute paths, be careful when moving files around;
'model.ckpt-*' : file(s) with model definition (created by tf)
self.init_from = 'save_vec'
#self.init_from = None
'''number of characters to sample'''
self.n = 500
'''prime text'''
self.prime = u' '
class TextLoader():
def __init__(self, data_dir, batch_size, seq_length, encoding='utf-8'):
self.data_dir = data_dir
self.batch_size = batch_size
self.seq_length = seq_length
self.encoding = encoding
input_file = os.path.join(data_dir, "input.txt")
vocab_file = os.path.join(data_dir, "vocab.pkl")
tensor_file = os.path.join(data_dir, "data.npy")
if not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):
print("reading text file")
self.preprocess(input_file, vocab_file, tensor_file)
else:
print("loading preprocessed files")
self.load_preprocessed(vocab_file, tensor_file)
self.create_batches()
self.reset_batch_pointer()
def preprocess(self, input_file, vocab_file, tensor_file):
with codecs.open(input_file, "r", encoding=self.encoding) as f:
data = f.read()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
self.chars, _ = zip(*count_pairs)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
with open(vocab_file, 'wb') as f:
cPickle.dump(self.chars, f)
self.tensor = np.array(list(map(self.vocab.get, data)))
np.save(tensor_file, self.tensor)
def load_preprocessed(self, vocab_file, tensor_file):
with open(vocab_file, 'rb') as f:
self.chars = cPickle.load(f)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
self.tensor = np.load(tensor_file)
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
def create_batches(self):
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
# When the data (tensor) is too small, let's give them a better error message
if self.num_batches==0:
assert False, "Not enough data. Make seq_length and batch_size small."
self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
xdata = self.tensor
ydata = np.copy(self.tensor)
ydata[:-1] = xdata[1:]
ydata[-1] = xdata[0]
self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)
self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)
def vectorize(self, x):
vectorized = np.zeros((len(x), len(x[0]), self.vocab_size))
for i in range(0, len(x)):
for j in range(0, len(x[0])):
vectorized[i][j][x[i][j]] = 1
return vectorized
def next_batch(self):
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer += 1
x_vectorized = self.vectorize(x)
y_vectorized = self.vectorize(y)
return x_vectorized, y_vectorized
def reset_batch_pointer(self):
self.pointer = 0
## First we open the file
args = Args()
input_file = os.path.join(args.data_dir, "input.txt")
f = codecs.open(input_file, "r", 'utf-8')
data = f.read()
print (data[0:300])
counter = collections.Counter(data)
print ('histogram of char from the input data file:', counter)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
print (count_pairs)
chars, _ = zip(*count_pairs)
print ('chars', chars)
vocab_size = len(chars)
print (vocab_size)
vocab = dict(zip(chars, range(len(chars))))
print (vocab)
print (vocab['a'])
# Karpathy orginal code seems to do the same:
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
vocab = { ch:i for i,ch in enumerate(chars) }
print (vocab)
data_in_array = map(vocab.get, data)
print (len(data_in_array))
print (data_in_array[0:200])
print (data_in_array[0], 'means', data[0],'witch is the first letter in data' )
tensor = np.array(data_in_array)
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
data_loader.create_batches()
x, y = data_loader.next_batch()
print ('x and y are matrix ', len(x), 'x', len(x[0]) )
print ('there are', len(x), 'batch that contains', len(x[0]), 'vector that have a size of', len(x[0][0]))
print ('x[0] is the first batch of input:')
print (x[0])
print ('x[0][0] is the first char:')
print (x[0][0])
print ('y[0][0] is the first batch of expected char:')
print (y[0][0])
print ('y[0] is x[0] shifted by one, in other words: y[0][x] == x[0][x+1]')
print ('y[0][10] ==', y[0][10])
print ('x[0][11] ==', x[0][11])
class Model():
def __init__(self, args, infer=False):
self.args = args
if infer:
'''Infer is true when the model is used for sampling'''
args.seq_length = 1
hidden_size = args.rnn_size
vocab_size = args.vocab_size
# define place holder to for the input data and the target.
self.input_data = tf.placeholder(tf.float32, [args.batch_size, args.seq_length, vocab_size], name='input_data')
self.target_data = tf.placeholder(tf.float32, [args.batch_size, args.seq_length, vocab_size], name='target_data')
# define the input xs
one_batch_input = tf.squeeze(tf.slice(self.input_data, [0, 0, 0], [1, args.seq_length, vocab_size]),[0])
xs = tf.split(0, args.seq_length, one_batch_input)
# define the target
one_batch_target = tf.squeeze(tf.slice(self.target_data, [0, 0, 0], [1, args.seq_length, vocab_size]),[0])
targets = tf.split(0, args.seq_length, one_batch_target)
#initial_state
self.initial_state = tf.zeros((hidden_size,1))
#last_state = tf.placeholder(tf.float32, (hidden_size, 1))
# model parameters
Wxh = tf.Variable(tf.random_uniform((hidden_size, vocab_size))*0.01, name='Wxh') # input to hidden
Whh = tf.Variable(tf.random_uniform((hidden_size, hidden_size))*0.01, name='Whh') # hidden to hidden
Why = tf.Variable(tf.random_uniform((vocab_size, hidden_size))*0.01, name='Why') # hidden to output
bh = tf.Variable(tf.zeros((hidden_size, 1)), name='bh') # hidden bias
by = tf.Variable(tf.zeros((vocab_size, 1)), name='by') # output bias
loss = tf.zeros([1], name='loss')
hs, ys, ps = {}, {}, {}
hs[-1] = self.initial_state
# forward pass
for t in xrange(args.seq_length):
xs_t = tf.transpose(xs[t])
targets_t = tf.transpose(targets[t])
hs[t] = tf.tanh(tf.matmul(Wxh, xs_t) + tf.matmul(Whh, hs[t-1]) + bh) # hidden state
ys[t] = tf.matmul(Why, hs[t]) + by # unnormalized log probabilities for next chars
ps[t] = tf.exp(ys[t]) / tf.reduce_sum(tf.exp(ys[t])) # probabilities for next chars
loss += -tf.log(tf.reduce_sum(tf.mul(ps[t], targets_t))) # softmax (cross-entropy loss)
#self.probs = ps[t]
self.cost = loss / args.batch_size / args.seq_length
self.final_state = hs[args.seq_length-1]
self.lr = tf.Variable(0.0, trainable=False, name='learning_rate')
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
def sample(self, sess, chars, vocab, num=200, prime='The '):
state = model.initial_state.eval()
for char in prime[:-1]:
x = np.zeros((1,1, 65))
x[0,0, vocab[char]] = 1
feed = {self.input_data: x, self.initial_state:state}
[state] = sess.run([self.final_state], feed)
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
ret = prime
char = prime[-1]
for n in range(num):
x = np.zeros((1,1, 65))
x[0,0, vocab[char]] = 1
feed = {self.input_data: x, self.initial_state:state}
[probs, state] = sess.run([self.probs, self.final_state], feed)
#print ('p', probs.ravel())
#print ('state', state.ravel())
sample = weighted_pick(probs)
#print ('sample', sample)
pred = chars[sample]
ret += pred
char = pred
return ret
def inspect(self, draw=False):
for var in tf.all_variables():
if var in tf.trainable_variables():
print ('t', var.name, var.eval().shape)
if draw:
plt.figure(figsize=(1,1))
plt.figimage(var.eval())
plt.show()
else:
print ('nt', var.name, var.eval().shape)
tf.reset_default_graph()
args = Args()
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
print (args.vocab_size)
model = Model(args)
print ("model created")
# Open a session to inspect the model
with tf.Session() as sess:
tf.initialize_all_variables().run()
print('All variable initialized')
model.inspect()
'''
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
print (ckpt)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
model.inspect()
plt.figure(figsize=(1,1))
plt.figimage(model.vectorize.eval())
plt.show()'''
# this code from:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
Strip large constant values from graph_def.
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def rename_nodes(graph_def, rename_func):
res_def = tf.GraphDef()
for n0 in graph_def.node:
n = res_def.node.add()
n.MergeFrom(n0)
n.name = rename_func(n.name)
for i, s in enumerate(n.input):
n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
return res_def
def show_graph(graph_def, max_const_size=32):
Visualize TensorFlow graph.
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code =
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
.format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe =
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
.format(code.replace('"', '"'))
display(HTML(iframe))
# write the graph to help visualizing it
model_fn = 'model.pb'
tf.train.write_graph(sess.graph.as_graph_def(),'.', model_fn, as_text=False)
# Visualizing the network graph. Be sure expand the "mixed" nodes to see their
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tmp_def = rename_nodes(graph_def, lambda s:"/".join(s.split('_',1)))
#show_graph(tmp_def)
args = Args()
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
# check compatibility if training is continued from previously saved model
if args.init_from is not None:
print ("need to load file from", args.init_from)
# check if all necessary files exist
assert os.path.isdir(args.init_from)," %s must be a a path" % args.init_from
assert os.path.isfile(os.path.join(args.init_from,"config.pkl")),"config.pkl file does not exist in path %s"%args.init_from
assert os.path.isfile(os.path.join(args.init_from,"chars_vocab.pkl")),"chars_vocab.pkl.pkl file does not exist in path %s" % args.init_from
ckpt = tf.train.get_checkpoint_state(args.init_from)
assert ckpt,"No checkpoint found"
assert ckpt.model_checkpoint_path,"No model path found in checkpoint"
# open old config and check if models are compatible
with open(os.path.join(args.init_from, 'config.pkl')) as f:
saved_model_args = cPickle.load(f)
print (saved_model_args)
need_be_same=["model","rnn_size","seq_length"]
for checkme in need_be_same:
assert vars(saved_model_args)[checkme]==vars(args)[checkme],"Command line argument and saved model disagree on '%s' "%checkme
# open saved vocab/dict and check if vocabs/dicts are compatible
with open(os.path.join(args.init_from, 'chars_vocab.pkl')) as f:
saved_chars, saved_vocab = cPickle.load(f)
assert saved_chars==data_loader.chars, "Data and loaded model disagreee on character set!"
assert saved_vocab==data_loader.vocab, "Data and loaded model disagreee on dictionary mappings!"
print ("config loaded")
with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
cPickle.dump((data_loader.chars, data_loader.vocab), f)
print (args.print_every)
tf.reset_default_graph()
model = Model(args)
print ("model created")
cost_optimisation = []
with tf.Session() as sess:
tf.initialize_all_variables().run()
print ("variable initialized")
saver = tf.train.Saver(tf.all_variables())
# restore model
if args.init_from is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
print ("model restored")
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
state = model.initial_state.eval()
for b in range(data_loader.num_batches):
start = time.time()
# Get learning data
x, y = data_loader.next_batch()
# Create the structure for the learning data
feed = {model.input_data: x, model.target_data: y, model.initial_state: state}
# Run a session using train_op
[train_loss], state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
end = time.time()
if (e * data_loader.num_batches + b) % args.print_every == 0:
cost_optimisation.append(train_loss)
print("{}/{} (epoch {}), train_loss = {:.6f}, time/batch = {:.3f}" \
.format(e * data_loader.num_batches + b,
args.num_epochs * data_loader.num_batches,
e, train_loss, end - start))
if (e * data_loader.num_batches + b) % args.save_every == 0\
or (e==args.num_epochs-1 and b == data_loader.num_batches-1): # save for the last result
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = e * data_loader.num_batches + b)
print("model saved to {}".format(checkpoint_path))
plt.figure(figsize=(12,5))
plt.plot(range(len(cost_optimisation)), cost_optimisation, label='cost')
plt.legend()
plt.show()
tf.reset_default_graph()
model_fn = 'model.pb'
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = cPickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = cPickle.load(f)
model = Model(saved_args, True) # True to generate the model in sampling mode
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
print (ckpt)
model.inspect(draw=True)
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
print (ckpt)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print(model.sample(sess, chars, vocab, args.n, args.prime))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import needed for Jupiter
Step2: Imports needed for utilities
Step4: Args, to define all parameters
Step5: Load the data
Step6: Let's see how preprocessing works
Step7: Then we have
Step8: It can be used to calculate an ID from vocab
Step9: This is equivalent of the following code by Karpathy
Step10: Now we have to make a tensor out of the data.
Step11: Then we create a numpy array out of it!
Step12: Let's see how batching works
Step13: The Model
Step14: Inspect the model variables
Step19: Visualize the graph
Step20: Trainning
Step21: Instanciate the model and train it.
Step22: Check Learning
Step23: Sampling
|
7,168
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Keras imports
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
# Build the model with keras
model = Sequential()
model.add( Dense( output_dim=1, input_dim=2 ) )
model.add( Activation( 'sigmoid' ) )
# Print the summary
model.summary()
# Load data
df = pd.read_csv('./data/setosa/train.csv')
X = df[['petal length (cm)', 'petal width (cm)']].values
y = df['setosa'].values
def plot_keras_model():
"Plot the results of the model, along with the data points"
# Calculate the probability on a mesh
petal_width_mesh, petal_length_mesh = \
np.meshgrid( np.linspace(0,3,100), np.linspace(0,8,100) )
petal_width_mesh = petal_width_mesh.flatten()
petal_length_mesh = petal_length_mesh.flatten()
p = model.predict( np.stack( (petal_length_mesh, petal_width_mesh), axis=1 ) )
p = p.reshape((100,100))
# Plot the probability on the mesh
plt.clf()
plt.imshow( p.T, extent=[0,8,0,3], origin='lower',
vmin=0, vmax=1, cmap='RdBu', aspect='auto', alpha=0.7 )
# Plot the data points
plt.scatter( df['petal length (cm)'], df['petal width (cm)'], c=df['setosa'], cmap='RdBu')
plt.xlabel('petal length (cm)')
plt.ylabel('petal width (cm)')
cb = plt.colorbar()
cb.set_label('setosa')
plot_keras_model()
# Prepare the model for training
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.1), metrics=['accuracy'])
# Train the network
model.fit( X, y, batch_size=16, nb_epoch=20, verbose=1 )
plot_keras_model()
df_test = pd.read_csv('./data/setosa/test.csv')
df_test.head(10)
model.predict( np.array([[4.2, 1.5]]) )
df_test['probability_setosa_predicted'] = model.predict( df_test[['petal length (cm)', 'petal width (cm)']].values )
df_test
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining the model with keras
Step2: Training the network
Step3: and define a function to look at the predictions of the model (which for the moment is untrained).
Step4: Keras will then automatically adjust the weights by trying to minimize a given loss function.
Step5: What just happened?
|
7,169
|
<ASSISTANT_TASK:>
Python Code:
# Install required package (Katib SDK).
!pip install kubeflow-katib==0.13.0
from kubeflow.katib import KatibClient
from kubernetes.client import V1ObjectMeta
from kubeflow.katib import V1beta1Experiment
from kubeflow.katib import V1beta1AlgorithmSpec
from kubeflow.katib import V1beta1AlgorithmSetting
from kubeflow.katib import V1beta1ObjectiveSpec
from kubeflow.katib import V1beta1MetricsCollectorSpec
from kubeflow.katib import V1beta1CollectorSpec
from kubeflow.katib import V1beta1SourceSpec
from kubeflow.katib import V1beta1FilterSpec
from kubeflow.katib import V1beta1FeasibleSpace
from kubeflow.katib import V1beta1ExperimentSpec
from kubeflow.katib import V1beta1NasConfig
from kubeflow.katib import V1beta1GraphConfig
from kubeflow.katib import V1beta1Operation
from kubeflow.katib import V1beta1ParameterSpec
from kubeflow.katib import V1beta1TrialTemplate
from kubeflow.katib import V1beta1TrialParameterSpec
# Experiment name and namespace.
namespace = "kubeflow-user-example-com"
experiment_name = "darts-example"
metadata = V1ObjectMeta(
name=experiment_name,
namespace=namespace
)
# Algorithm specification.
algorithm_spec=V1beta1AlgorithmSpec(
algorithm_name="darts",
algorithm_settings=[
V1beta1AlgorithmSetting(
name="num_epochs",
value="2"
),
V1beta1AlgorithmSetting(
name="stem_multiplier",
value="1"
),
V1beta1AlgorithmSetting(
name="init_channels",
value="4"
),
V1beta1AlgorithmSetting(
name="num_nodes",
value="3"
),
]
)
# Objective specification. For DARTS Goal is omitted.
objective_spec=V1beta1ObjectiveSpec(
type="maximize",
objective_metric_name="Best-Genotype",
)
# Metrics collector specification.
# We should specify metrics format to get Genotype from training container.
metrics_collector_spec=V1beta1MetricsCollectorSpec(
collector=V1beta1CollectorSpec(
kind="StdOut"
),
source=V1beta1SourceSpec(
filter=V1beta1FilterSpec(
metrics_format=[
"([\\w-]+)=(Genotype.*)"
]
)
)
)
# Configuration for the Neural Network (NN).
# This NN contains 2 number of layers and 5 various operations with different parameters.
nas_config=V1beta1NasConfig(
graph_config=V1beta1GraphConfig(
num_layers=2
),
operations=[
V1beta1Operation(
operation_type="separable_convolution",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3"]
),
)
]
),
V1beta1Operation(
operation_type="dilated_convolution",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3", "5"]
),
)
]
),
V1beta1Operation(
operation_type="avg_pooling",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3"]
),
)
]
),
V1beta1Operation(
operation_type="max_pooling",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3"]
),
)
]
),
V1beta1Operation(
operation_type="skip_connection",
),
]
)
# JSON template specification for the Trial's Worker Kubernetes Job.
trial_spec={
"apiVersion": "batch/v1",
"kind": "Job",
"spec": {
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "training-container",
"image": "docker.io/kubeflowkatib/darts-cnn-cifar10:v0.13.0",
"command": [
'python3',
'run_trial.py',
'--algorithm-settings="${trialParameters.algorithmSettings}"',
'--search-space="${trialParameters.searchSpace}"',
'--num-layers="${trialParameters.numberLayers}"'
],
# Training container requires 1 GPU.
"resources": {
"limits": {
"nvidia.com/gpu": 1
}
}
}
],
"restartPolicy": "Never"
}
}
}
}
# Template with Trial parameters and Trial spec.
# Set retain to True to save trial resources after completion.
trial_template=V1beta1TrialTemplate(
retain=True,
primary_container_name="training-container",
trial_parameters=[
V1beta1TrialParameterSpec(
name="algorithmSettings",
description=" Algorithm settings of DARTS Experiment",
reference="algorithm-settings"
),
V1beta1TrialParameterSpec(
name="searchSpace",
description="Search Space of DARTS Experiment",
reference="search-space"
),
V1beta1TrialParameterSpec(
name="numberLayers",
description="Number of Neural Network layers",
reference="num-layers"
),
],
trial_spec=trial_spec
)
# Experiment object.
experiment = V1beta1Experiment(
api_version="kubeflow.org/v1beta1",
kind="Experiment",
metadata=metadata,
spec=V1beta1ExperimentSpec(
max_trial_count=1,
parallel_trial_count=1,
max_failed_trial_count=1,
algorithm=algorithm_spec,
objective=objective_spec,
metrics_collector_spec=metrics_collector_spec,
nas_config=nas_config,
trial_template=trial_template,
)
)
# Print the Trial template container info.
print(experiment.spec.trial_template.trial_spec["spec"]["template"]["spec"]["containers"][0])
# Create client.
kclient = KatibClient()
# Create your Experiment.
kclient.create_experiment(experiment,namespace=namespace)
exp = kclient.get_experiment(name=experiment_name, namespace=namespace)
print(exp)
print("-----------------\n")
# Get the latest status.
print(exp["status"]["conditions"][-1])
kclient.get_experiment_status(name=experiment_name, namespace=namespace)
kclient.is_experiment_succeeded(name=experiment_name, namespace=namespace)
opt_trial = kclient.get_optimal_hyperparameters(name=experiment_name, namespace=namespace)
best_genotype = opt_trial["currentOptimalTrial"]["observation"]["metrics"][0]["latest"]
print(best_genotype)
kclient.delete_experiment(name=experiment_name, namespace=namespace)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import required packages
Step2: Define your Experiment
Step3: You can print the Experiment's info to verify it before submission.
Step4: Create your Experiment
Step5: Get your Experiment
Step6: Get the current Experiment status
Step7: You can check if your Experiment is succeeded.
Step8: Get the best Genotype
Step9: Delete your Experiments
|
7,170
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi']=150
# Gravity Recovery and Climate Experiment (GRACE) Data
# Source: http://grace.jpl.nasa.gov/
# Current surface mass change data, measuring equivalent water thickness in cm, versus time
# This data fetcher uses results from the Mascon solutions
from skdaccess.geo.grace.mascon.cache import DataFetcher as GR_DF
from skdaccess.framework.param_class import *
geo_point = AutoList([(38, -117)]) # location in Nevada
grace_fetcher = GR_DF([geo_point],start_date='2010-01-01',end_date='2014-01-01')
grace_data_wrapper = grace_fetcher.output() # Get a data wrapper
grace_label, grace_data = next(grace_data_wrapper.getIterator())# Get GRACE data
grace_data.head()
scale_factor = grace_data_wrapper.info(grace_label)['scale_factor']
plt.plot(grace_data['EWD']*scale_factor);
plt.xticks(rotation=35);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get scale factor
Step2: Plot EWD $\times$ scale factor
|
7,171
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function # Python 2/3 compatibility
import numpy as np
import pandas as pd
from IPython.display import Image
## Your Turn
## Your Turn
## Choosing an Estimator
# http://scikit-learn.org/stable/tutorial/machine_learning_map/index.html
Image("http://scikit-learn.org/stable/_static/ml_map.png")
## Your Turn
## Your Turn
## Your Turn
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Vectorize
Step3: Model
Step4: Model Tuning
Step5: Feeling Good? - Let's Update Kaggle Submission
Steps
|
7,172
|
<ASSISTANT_TASK:>
Python Code:
problem_name = "librispeech_clean"
asr_problem = problems.problem(problem_name)
encoders = asr_problem.feature_encoders(None)
model_name = "transformer"
hparams_set = "transformer_librispeech_tpu"
hparams = trainer_lib.create_hparams(hparams_set,data_dir=data_dir, problem_name=problem_name)
asr_model = registry.model(model_name)(hparams, Modes.PREDICT)
def encode(x):
waveforms = encoders["waveforms"].encode(x)
encoded_dict = asr_problem.preprocess_example({"waveforms":waveforms, "targets":[]}, Modes.PREDICT, hparams)
return {"inputs" : tf.expand_dims(encoded_dict["inputs"], 0), "targets" : tf.expand_dims(encoded_dict["targets"], 0)}
def decode(integers):
integers = list(np.squeeze(integers))
if 1 in integers:
integers = integers[:integers.index(1)]
return encoders["targets"].decode(np.squeeze(integers))
# Copy the pretrained checkpoint locally
ckpt_name = "transformer_asr_180214"
gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)
print(gs_ckpt)
!gsutil cp -R {gs_ckpt} {checkpoint_dir}
ckpt_path = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, ckpt_name))
ckpt_path
# Restore and transcribe!
def transcribe(inputs):
encoded_inputs = encode(inputs)
with tfe.restore_variables_on_create(ckpt_path):
model_output = asr_model.infer(encoded_inputs, beam_size=2, alpha=0.6, decode_length=1)["outputs"]
return decode(model_output)
def play_and_transcribe(inputs):
waveforms = encoders["waveforms"].encode(inputs)
IPython.display.display(IPython.display.Audio(data=waveforms, rate=16000))
return transcribe(inputs)
uploaded = google.colab.files.upload()
prerecorded_messages = []
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
mem_file = cStringIO.StringIO(uploaded[fn])
save_filename = os.path.join(tmp_dir, fn)
with open(save_filename, 'w') as fd:
mem_file.seek(0)
shutil.copyfileobj(mem_file, fd)
prerecorded_messages.append(save_filename)
for inputs in prerecorded_messages:
outputs = play_and_transcribe(inputs)
print("Inputs: %s" % inputs)
print("Outputs: %s" % outputs)
# Records webm file and converts
def RecordNewAudioSample(filename=None, webm_filename=None):
Args:
filename - string, path for storing wav file
webm_filename - string, path for storing webm file
Returns:
string - path where wav file was saved. (=filename if specified)
# Create default filenames in tmp_dir if not specified.
if not filename:
filename = os.path.join(tmp_dir, "recording.wav")
if not webm_filename:
webm_filename = os.path.join(tmp_dir, "recording.webm")
# Record webm file form colab.
audio = google.colab._message.blocking_request('user_media', {"audio":True, "video":False, "duration":-1}, timeout_sec=600)
#audio = frontend.RecordMedia(True, False)
# Convert the recording into in_memory file.
music_mem_file = cStringIO.StringIO(
base64.decodestring(audio[audio.index(',')+1:]))
# Store webm recording in webm_filename. Storing is necessary for conversion.
with open(webm_filename, 'w') as fd:
music_mem_file.seek(0)
shutil.copyfileobj(music_mem_file, fd)
# Open stored file and save it as wav with sample_rate=16000.
pydub.AudioSegment.from_file(webm_filename, codec="opus"
).set_frame_rate(16000).export(out_f=filename,
format="wav")
return filename
# Record the sample
my_sample_filename = RecordNewAudioSample()
print my_sample_filename
print play_and_transcribe(my_sample_filename)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define path to checkpoint
Step2: Define transcribe function
Step3: Decoding prerecorded examples
Step5: Recording your own examples
|
7,173
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (15.0, 8.0)
# First, we need to know what's in the data file.
!head R11ceph.dat
class Cepheids(object):
def __init__(self,filename):
# Read in the data and store it in this master array:
self.data = np.loadtxt(filename)
self.hosts = self.data[:,1].astype('int').astype('str')
# We'll need the plotting setup to be the same each time we make a plot:
colornames = ['red','orange','yellow','green','cyan','blue','violet','magenta','gray']
self.colors = dict(zip(self.list_hosts(), colornames))
self.xlimits = np.array([0.3,2.3])
self.ylimits = np.array([30.0,17.0])
return
def list_hosts(self):
# The list of (9) unique galaxy host names:
return np.unique(self.hosts)
def select(self,ID):
# Pull out one galaxy's data from the master array:
index = (self.hosts == str(ID))
self.m = data[index,2]
self.merr = data[index,3]
self.logP = np.log10(data[index,4])
return
def plot(self,X):
# Plot all the points in the dataset for host galaxy X.
ID = str(X)
self.select(ID)
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.errorbar(self.logP, self.m, yerr=self.merr, fmt='.', ms=7, lw=1, color=self.colors[ID], label='NGC'+ID)
plt.xlabel('$\\log_{10} P / {\\rm days}$',fontsize=20)
plt.ylabel('${\\rm magnitude (AB)}$',fontsize=20)
plt.xlim(self.xlimits)
plt.ylim(self.ylimits)
plt.title('Cepheid Period-Luminosity (Riess et al 2011)',fontsize=20)
return
def overlay_straight_line_with(self,m=0.0,c=24.0):
# Overlay a straight line with gradient m and intercept c.
x = self.xlimits
y = m*x + c
plt.plot(x, y, 'k-', alpha=0.5, lw=2)
plt.xlim(self.xlimits)
plt.ylim(self.ylimits)
return
def add_legend(self):
plt.legend(loc='upper left')
return
C = Cepheids('R11ceph.dat')
print C.colors
C.plot(4258)
C.plot(1309)
# for ID in C.list_hosts():
# C.plot(ID)
C.overlay_straight_line_with(m=-3.0,c=26.0)
C.add_legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A Look at Each Host Galaxy's Cepheids
Step2: OK, now we are all set up! Let's plot some data.
|
7,174
|
<ASSISTANT_TASK:>
Python Code:
# 2001 census area units
path = hp.DATA_DIR/'collected'/'Geographical Table.csv'
f = pd.read_csv(path, dtype={'SAU': str})
f = f.rename(columns={
'SAU': 'au2001',
'SAU.Desc': 'au_name',
'TA': 'territory',
'Region': 'region',
})
del f['Water']
f.head()
# rental area units
path = hp.DATA_DIR/'collected'/'Market Rent Areas.csv'
g = pd.read_csv(path, dtype={'SAU': str})
g = g.rename(columns={
'SAU': 'au2001',
'MARKET RENT DESCRIPTION': 'rental_area',
'TA': 'territory',
'AU NAME': 'au_name',
})
# Clean rental areas
def clean(x):
y = x.split(' - ')
y = y[1] if 'District' not in y[1] else y[0]
return y
g['rental_area'] = g['rental_area'].map(clean)
f = f.merge(g[['au2001', 'rental_area']])
path = hp.get_path('au2001_csv')
f.to_csv(path, index=False)
f.head()
# Read Shapefile
path = hp.DATA_DIR/'collected'/'NZ_AU01_region_simplified'/'NZ_AU01_region.shp'
au = gpd.read_file(str(path))
au.crs = hp.CRS_NZGD49
au = au.to_crs(hp.CRS_WGS84)
au = au.rename(columns={'AU01': 'au2001', 'AU_DESC': 'au_name'})
print(au.shape)
print(au.head())
au.head().plot()
# Remove water area units
pattern = r'ocean|strait|inlet|harbour'
cond = au['au_name'].str.contains(pattern, case=False)
au = au[~cond].copy()
print(au.shape)
au.head().plot()
# Merge geodata and metadata, drop null regions, and write to file
f = hp.get_data('au2001_csv')
g = au.merge(f[['au2001', 'territory', 'region', 'rental_area']])
g = g[g['region'].notnull()].copy()
path = hp.get_path('au2001')
with path.open('w') as tgt:
tgt.write(g.to_json())
g.head()
# Dissolve area units by area unit group
au = get_data('au2001')
ra = au[['rental_area', 'region', 'territory', 'geometry']].dissolve(by='rental_area').reset_index()
path = hp.get_path('rental_areas')
with path.open('w') as tgt:
tgt.write(ra.to_json())
ra.head()
ra = hp.get_data('rental_areas')
t = hp.get_data('property_titles')
t.head()
# Spatial-join titles to rental areas
%time f = gpd.sjoin(t[['geometry', 'fid']], ra, op='intersects')
f.head()
# Choose representative points for rental areas
def pt(group):
d = {}
d['geometry'] = so.unary_union(group['geometry']).representative_point()
d['territory'] = group['territory'].iat[0]
d['region'] = group['region'].iat[0]
return pd.Series(d)
g = gpd.GeoDataFrame(f.groupby('rental_area').apply(pt).reset_index())
path = hp.get_path('rental_points')
with path.open('w') as tgt:
tgt.write(g.to_json())
g.head()
ra = hp.get_data('rental_areas')
rap = hp.get_data('rental_points')
for region in hp.REGIONS:
region_root = hp.DATA_DIR/region
if not region_root.exists():
region_root.mkdir()
region_c = region.capitalize()
# Rental areas slice
f = ra[ra['region'] == region_c].copy()
path = hp.get_path('rental_areas', region)
with path.open('w') as tgt:
tgt.write(f.to_json())
# Rental area points slice
f = rap[rap['region'] == region_c].copy()
path = hp.get_path('rental_points', region)
with path.open('w') as tgt:
tgt.write(f.to_json())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Process area units and rental areas into GeoJSON
Step2: Create geodata for rental areas
Step3: Choose representative points for rental areas using approximate centroids of property titles
Step4: Prepare regional slices of data
|
7,175
|
<ASSISTANT_TASK:>
Python Code:
# we assume that we have the dynet module in your path.
# OUTDATED: we also assume that LD_LIBRARY_PATH includes a pointer to where libcnn_shared.so is.
from dynet import *
model = Model()
NUM_LAYERS=2
INPUT_DIM=50
HIDDEN_DIM=10
builder = LSTMBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# or:
# builder = SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
s0 = builder.initial_state()
x1 = vecInput(INPUT_DIM)
s1=s0.add_input(x1)
y1 = s1.output()
# here, we add x1 to the RNN, and the output we get from the top is y (a HIDEN_DIM-dim vector)
y1.npvalue().shape
s2=s1.add_input(x1) # we can add another input
y2=s2.output()
print s2.h()
# create a simple rnn builder
rnnbuilder=SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# initialize a new graph, and a new sequence
rs0 = rnnbuilder.initial_state()
# add inputs
rs1 = rs0.add_input(x1)
ry1 = rs1.output()
print "all layers:", s1.h()
print s1.s()
rnn_h = rs1.h()
rnn_s = rs1.s()
print "RNN h:", rnn_h
print "RNN s:", rnn_s
lstm_h = s1.h()
lstm_s = s1.s()
print "LSTM h:", lstm_h
print "LSTM s:", lstm_s
s2=s1.add_input(x1)
s3=s2.add_input(x1)
s4=s3.add_input(x1)
# let's continue s3 with a new input.
s5=s3.add_input(x1)
# we now have two different sequences:
# s0,s1,s2,s3,s4
# s0,s1,s2,s3,s5
# the two sequences share parameters.
assert(s5.prev() == s3)
assert(s4.prev() == s3)
s6=s3.prev().add_input(x1)
# we now have an additional sequence:
# s0,s1,s2,s6
s6.h()
s6.s()
state = rnnbuilder.initial_state()
xs = [x1,x1,x1]
states = state.add_inputs(xs)
outputs = [s.output() for s in states]
hs = [s.h() for s in states]
print outputs, hs
state = rnnbuilder.initial_state()
xs = [x1,x1,x1]
outputs = state.transduce(xs)
print outputs
import random
from collections import defaultdict
from itertools import count
import sys
LAYERS = 2
INPUT_DIM = 50
HIDDEN_DIM = 50
characters = list("abcdefghijklmnopqrstuvwxyz ")
characters.append("<EOS>")
int2char = list(characters)
char2int = {c:i for i,c in enumerate(characters)}
VOCAB_SIZE = len(characters)
model = Model()
srnn = SimpleRNNBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
lstm = LSTMBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
params = {}
params["lookup"] = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
params["R"] = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
params["bias"] = model.add_parameters((VOCAB_SIZE))
# return compute loss of RNN for one sentence
def do_one_sentence(rnn, sentence):
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(params["R"])
bias = parameter(params["bias"])
lookup = params["lookup"]
sentence = ["<EOS>"] + list(sentence) + ["<EOS>"]
sentence = [char2int[c] for c in sentence]
s = s0
loss = []
for char,next_char in zip(sentence,sentence[1:]):
s = s.add_input(lookup[char])
probs = softmax(R*s.output() + bias)
loss.append( -log(pick(probs,next_char)) )
loss = esum(loss)
return loss
# generate from model:
def generate(rnn):
def sample(probs):
rnd = random.random()
for i,p in enumerate(probs):
rnd -= p
if rnd <= 0: break
return i
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(params["R"])
bias = parameter(params["bias"])
lookup = params["lookup"]
s = s0.add_input(lookup[char2int["<EOS>"]])
out=[]
while True:
probs = softmax(R*s.output() + bias)
probs = probs.vec_value()
next_char = sample(probs)
out.append(int2char[next_char])
if out[-1] == "<EOS>": break
s = s.add_input(lookup[next_char])
return "".join(out[:-1]) # strip the <EOS>
# train, and generate every 5 samples
def train(rnn, sentence):
trainer = SimpleSGDTrainer(model)
for i in xrange(200):
loss = do_one_sentence(rnn, sentence)
loss_value = loss.value()
loss.backward()
trainer.update()
if i % 5 == 0:
print loss_value,
print generate(rnn)
sentence = "a quick brown fox jumped over the lazy dog"
train(srnn, sentence)
sentence = "a quick brown fox jumped over the lazy dog"
train(lstm, sentence)
train(srnn, "these pretzels are making me thirsty")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An LSTM/RNN overview
Step2: Note that when we create the builder, it adds the internal RNN parameters to the model.
Step3: If our LSTM/RNN was one layer deep, y2 would be equal to the hidden state. However, since it is 2 layers deep, y2 is only the hidden state (= output) of the last layer.
Step4: The same interface that we saw until now for the LSTM, holds also for the Simple RNN
Step5: To summarize, when calling .add_input(x) on an RNNState what happens is that the state creates a new RNN/LSTM column, passing it
Step6: As we can see, the LSTM has two extra state expressions (one for each hidden layer) before the outputs h.
Step7: Aside
Step8: This is convenient.
Step9: Charecter-level LSTM
Step10: Notice that
Step11: The model seem to learn the sentence quite well.
|
7,176
|
<ASSISTANT_TASK:>
Python Code:
#graphistry
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
#splunk
SPLUNK = {
'host': 'SPLUNK.MYSITE.COM',
'scheme': 'https',
'port': 8089,
'username': 'corelight_tutorial',
'password': 'MY_SPLUNK_PWD'
}
!pip install graphistry -q
!pip install splunk-sdk -q
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import sys
import numpy as np
import math
np.set_printoptions(threshold=sys.maxsize)
import re
import graphistry
graphistry.register(**GRAPHISTRY)
import splunklib
import splunklib.client as client
import splunklib.results as results
service = client.connect(**SPLUNK)
def safe_log(v):
try:
v2 = float(v)
return math.log(round(v2) + 1) if not np.isnan(v2) else 0
except:
return 0
# Convert bytes to log of numbers
# Running this twice is safe (idempotent)
# Returns a copy (no mutation of the original)
def log_of_bytes(df):
df2 = df.copy()
for c in [c for c in df.columns if re.match('.*bytes.*', c) and not re.match('log\(.*', c)]:
df2['log(' + c + ')'] = df[c].apply(safe_log)
return df2
STEP = 50000;
def splunkToPandas(qry, overrides={}):
kwargs_blockingsearch = {
"count": 0,
"earliest_time": "2010-01-24T07:20:38.000-05:00",
"latest_time": "now",
"search_mode": "normal",
"exec_mode": "blocking",
**overrides}
job = service.jobs.create(qry, **kwargs_blockingsearch)
print("Search results:\n")
resultCount = job["resultCount"]
offset = 0;
print('results', resultCount)
out = None
while (offset < int(resultCount)):
print("fetching:", offset, '-', offset + STEP)
kwargs_paginate = {**kwargs_blockingsearch,
"count": STEP,
"offset": offset}
# Get the search results and display them
blocksearch_results = job.results(**kwargs_paginate)
reader = results.ResultsReader(blocksearch_results)
lst = [x for x in reader]
df2 = pd.DataFrame(lst)
out = df2 if type(out) == type(None) else pd.concat([out, df2], ignore_index=True)
offset += STEP
for c in out.columns:
out[c] = out[c].astype(str)
return out
categories = {
'ip': ['id.orig_h', 'id.resp_h']
}
opts={
'CATEGORIES': categories
}
##Extend graphistry.plotter.Plotter to add chainable method "my+graph.color_points_by('some_column_name')..." (and "color_edges_by")
import graphistry.plotter
def color_col_by_categorical(df, type_col):
types = list(df[type_col].unique())
type_to_color = {t: i for (i, t) in enumerate(types)}
return df[type_col].apply(lambda t: type_to_color[t])
def color_col_by_continuous(df, type_col):
mn = df[type_col].astype(float).min()
mx = df[type_col].astype(float).max()
if mx - mn < 0.000001:
print('warning: too small values for color_col_by_continuous')
return color_col_by_categorical(df, type_col)
else:
print('coloring for range', mn, mx)
return df[type_col].apply(lambda v: 228010 - round(10 * (float(v) - mn)/(mx - mn) ))
## g * str * 'categorical' | 'continuous' -> g
def color_points_by(g, type_col, kind='categorical'):
fn = color_col_by_categorical if kind == 'categorical' else color_col_by_continuous
colors = fn(g._nodes, type_col)
return g.nodes( g._nodes.assign(point_color=colors) ).bind(point_color='point_color')
## g * str * 'categorical' | 'continuous' -> g
def color_edges_by(g, type_col, kind='categorical'):
fn = color_col_by_categorical if kind == 'categorical' else color_col_by_continuous
colors = fn(g._edges, type_col)
return g.edges( g._edges.assign(edge_color=colors) ).bind(edge_color='edge_color')
graphistry.plotter.Plotter.color_points_by = color_points_by
graphistry.plotter.Plotter.color_edges_by = color_edges_by
## remove node/edges pointing to "*::nan" values
def safe_not_nan(prog, v):
try:
return not prog.match(v)
except:
return True
def drop_nan_col(df, col, prog):
not_nans = df[col].apply(lambda v: safe_not_nan(prog, v))
return df[ not_nans == True ]
def drop_nan(g, edges = ['src', 'dst'], nodes = ['nodeID']):
prog = re.compile(".*::nan$")
edges2 = g._edges
for col_name in g._edges.columns:
edges2 = drop_nan_col(edges2, col_name, prog)
nodes2 = g._nodes
for col_name in g._nodes.columns:
nodes2 = drop_nan_col(nodes2, col_name, prog)
return g.nodes(nodes2).edges(edges2)
graphistry.plotter.Plotter.drop_hyper_nans = drop_nan
df = splunkToPandas(
search index=corelight_tutorial
| dedup id.orig_h, id.resp_h, name
| fields - _*
| head 100
,
{'sample_ratio': 10}) # Optional, means "sample 1 in 10"
print('# rows', len(df))
df.sample(3)
hg = graphistry.hypergraph(
df,
["id.orig_h", "id.resp_h", "name", "uid"],
direct=True,
opts={
'CATEGORIES': {
'ip': ['id.orig_h', 'id.resp_h'] # combine repeats across columns into the same nodes
}
})
hg['graph'].plot()
#optional - add: OR (version=* AND version != TLSv12)
certs_a_df = splunkToPandas(
search index="corelight_tutorial" cert_chain_fuids{}=*
validation_status="certificate has expired"
OR validation_status="self signed certificate"
OR validation_status ="self signed certificate in certificate chain"
| fields *
| fields - _*
| head 50000
,
{'sample_ratio': 1})
print('# rows', len(certs_a_df))
certs_a_df.sample(10)
hg = graphistry.hypergraph(
certs_a_df,
["id.orig_h", "id.resp_h", "uid", "ja3", "issuer", "subject"], ### "uid", "protocol", ....
direct=True,
opts={
**opts,
'EDGES': {
'id.orig_h': ["id.resp_h", "ja3", "subject"],
'ja3': ['id.resp_h'],
"subject": ['id.resp_h'],
'issuer': ['id.resp_h']
}})
hg['graph'].bind(edge_title='category').drop_hyper_nans().color_points_by('category').color_edges_by('version').plot()
ntlm_a_df = splunkToPandas(
search index="corelight_tutorial"
[ search index="corelight_tutorial" ntlm | dedup uid | fields + uid ]
| fields *
| head 1000
,
{'sample_ratio': 1})
print('# rows', len(ntlm_a_df))
ntlm_a_df.sample(3)
hg = graphistry.hypergraph(
ntlm_a_df,
["id.orig_h", "name", "id.resp_h", "path", "hostname", "domainname", "username"], ### "uid", "protocol", ....
direct=True,
opts={
**opts,
'EDGES': {
"username": ['id.orig_h'],
"id.orig_h": ['name', 'id.resp_h', "hostname", "domainname"],
'path': ['name'],
'hostname': ['id.resp_h'],
'domainname': ['id.resp_h'],
"name": ['id.resp_h'],
"id.resp_h": ['username']
}})
hg['graph'].bind(edge_title='name').drop_hyper_nans().color_points_by('category').color_edges_by('username').plot()
dns_a_df = splunkToPandas(
search index="corelight_tutorial" sourcetype="conn"
| eval total_bytes = orig_ip_bytes + resp_ip_bytes
| eval log_total_bytes = log(orig_ip_bytes + resp_ip_bytes)
| stats
count(_time) as count,
earliest(_time), latest(_time),
values(answers{}) as answers,
values(conn_state),
values(history)
values(issuer),
values(ja3),
values(last_alert),
values(qtype_name),
values(subject),
max(*bytes), avg(*bytes), sum(*bytes),
by id.orig_h, id.resp_h
| eval duration_ms = last_time_ms - first_time_ms
| head 50000
,
{'sample_ratio': 1})
print('# rows', len(dns_a_df))
dns_a_df.sample(3)
hg = graphistry.hypergraph(
dns_a_df,
["id.orig_h", "id.resp_h"], ### "uid", "protocol", ....
direct=True,
opts=opts)
hg['graph'].color_points_by('category').color_edges_by('max(log_total_bytes)', 'continuous').bind(edge_title='max(total_bytes)').plot()
dns_b_df = splunkToPandas(
search index="corelight_tutorial" sourcetype="conn"
| eval total_bytes = orig_ip_bytes + resp_ip_bytes
| eval log_total_bytes = log(orig_ip_bytes + resp_ip_bytes)
| eval query_length = length(query)
| eval long_answers=mvfilter(length('answers{}') > 45)
| eval long_answers_length = max(length(long_answers))
| where query_length > 25 OR long_answers_length > 45
| stats
count(_time) as count,
earliest(_time), latest(_time),
values(answers{}) as answers,
max(long_answers_length) as max_long_answers_length,
values(conn_state),
values(history)
values(issuer),
values(ja3),
values(last_alert),
values(subject),
max(*bytes), avg(*bytes), sum(*bytes),
values(qtype_name),
first(uid),
max(*bytes), avg(*bytes), sum(*bytes),
by id.orig_h, id.resp_h, query, query_length
| eval duration_ms = last_time_ms - first_time_ms
| eval query=substr(query,1,100)
| eval max_query_or_answer_length = max(query_length, max_long_answers_length)
| sort max_query_or_answer_length desc
| head 50000
,
{'sample_ratio': 1})
print('# rows', len(dns_b_df))
dns_b_df.sample(3)
hg = graphistry.hypergraph(
dns_b_df,
["id.orig_h", "id.resp_h", "query", "answers"], ### "uid", "protocol", ....
direct=True,
opts={
**opts,
'EDGES': {
'id.orig_h': ['query'],
'query': ['id.resp_h'],
'id.resp_h': ['answers'],
'answers': ['id.orig_h']
}})
g = hg['graph'].bind(edge_title='query').drop_hyper_nans().color_points_by('category').color_edges_by('max_query_or_answer_length', 'continuous')
g.plot()
dns_b2_df = splunkToPandas(
search index="corelight_tutorial"
C3ApkJ3TwWW64DtnWb OR CaAbvy2ureWe5sifRf OR 10.0.2.30 OR 10.0.2.20 OR 34.215.241.13 OR 192.168.1.128
| eval time=ts
| rename answers{} as answers
| fields *
| fields - _*
| head 50000
,
{'sample_ratio': 1})
print('# rows', len(dns_b2_df))
dns_b2_df.sample(3)
hg = graphistry.hypergraph(
dns_b2_df,
["id.orig_h", "id.resp_h"], ### "uid", "protocol", ....
direct=True,
opts=opts)
hg['graph'].bind(edge_title='sourcetype').drop_hyper_nans().color_points_by('category').color_edges_by('sourcetype').plot()
dns_b3_df = splunkToPandas(
search index="corelight_tutorial" sourcetype="conn"
C3ApkJ3TwWW64DtnWb OR CaAbvy2ureWe5sifRf OR 10.0.2.30 OR 10.0.2.20 OR 34.215.241.13 OR 192.168.1.128
| eval total_bytes = orig_ip_bytes + resp_ip_bytes
| eval log_total_bytes = log(orig_ip_bytes + resp_ip_bytes)
| eval query_length = length(query)
| eval long_answers=mvfilter(length('answers{}') > 45)
| eval long_answers_length = max(length(long_answers))
| where query_length > 25 OR long_answers_length > 45
| stats
count(_time) as count,
earliest(_time), latest(_time),
values(answers{}) as answers,
max(long_answers_length) as max_long_answers_length,
values(conn_state),
values(history)
values(issuer),
values(ja3),
values(last_alert),
values(subject),
max(*bytes), avg(*bytes), sum(*bytes),
values(qtype_name),
first(uid),
max(*bytes), avg(*bytes), sum(*bytes),
by id.orig_h, id.resp_h, query, query_length
| eval duration_ms = last_time_ms - first_time_ms
| eval query=substr(query,1,100)
| eval max_query_or_answer_length = max(query_length, max_long_answers_length)
| sort max_query_or_answer_length desc
| head 50000
,
{'sample_ratio': 1})
print('# rows', len(dns_b3_df))
dns_b3_df.sample(3)
hg = graphistry.hypergraph(
dns_b3_df,
["id.orig_h", "id.resp_h", "query", "answers", "first(uid)"], ### "uid", "protocol", ....
direct=True,
opts={
**opts,
'EDGES': {
'id.orig_h': ['query'],
'query': ['id.resp_h'],
'id.resp_h': ['answers'],
'answers': ['id.orig_h']
}})
hg['graph'].bind(edge_title='query').drop_hyper_nans().color_points_by('category').color_edges_by('max_query_or_answer_length', 'continuous').plot()
mime_df = splunkToPandas(
search index=corelight_tutorial filename!=*.exe mime_type=application/x-dosexec
| head 200
,
{'sample_ratio': 1})
print('# rows', len(dns_b3_df))
dns_b3_df.sample(3)
## Old a
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: Helpers
Step3: Splunk
Step4: Bro/Zeek
Step5: Graphistry
Step7: Notebook intro
Step9: 1. Hunting through encrypted traffic
Step10: The graph
Step12: 2. Hunting Insider Threats with NTLM+SMB
Step13: The graph
Step15: 3. DNS Tunneling
Step16: Graph demo
Step18: 3.B. DNS Tunnel
Step19: The graph
Step21: Dig into interesting UIDs and IPs 1
Step23: Dig into interesting UIDs and IPs 2
Step25: 4. Mimetype Mismatch
Step26: The graph
|
7,177
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
fname = "../data/periods.csv"
df = pd.read_csv(fname)
df
df[df.name=="Permian"].start
df.loc[df.name=='Cretaceous', 'start'] = 145.0
df.loc[df.name=='Cretaceous', 'start']
df.to_csv("../data/pdout.csv")
import csv
with open(fname) as f:
reader = csv.reader(f)
data = [row for row in reader]
data
[d[2] for d in data if d[0]=="Permian"]
with open(fname) as f:
reader = csv.DictReader(f)
data = [row for row in reader]
data
[d['start'] for d in data if d['name']=="Permian"]
import requests
import io
df = pd.read_csv('https://raw.githubusercontent.com/agile-geoscience/xlines/master/data/periods.csv')
df.head()
import numpy as np
x = np.genfromtxt(fname, delimiter=',', skip_header=1, usecols=[2,3])
x
np.savetxt("../data/npout.csv", x, delimiter=",", header="start,end")
key = "PUT YOUR KEY HERE"
import json
url = "https://sheets.googleapis.com/v4/spreadsheets/{id}/values/{sheet}"
meta = {"id": "1YlnEGT8uHpRllk7rjAgFFl8V6B5-kl02DBie11PjG9Q",
"sheet": "Sheet1"
}
url = url.format(**meta)
params = {"key": key}
r = requests.get(url, params=params)
j = json.loads(r.text)['values']
df = pd.DataFrame(j[1:], columns=j[0])
df.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can get the start of the Permian like this
Step2: Let's fix the start of the Cretaceous
Step3: After you have changed or added to a DataFrame, pandas also makes it very easy to write a CSV file containing your data.
Step4: Using csv.reader
Step5: Note that we needed to know the positions of the items in the rows, which we could only get by inspection. We could skip that header row if we wanted to, but there's a better way
Step6: There is a corresponding DictWriter class for writing CSVs.
Step7: Bonus
Step8: We can write a CSV like so
Step9: Bonus
|
7,178
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
# plot the predicted values and actual values (for the test data)
def plot_result(test_df, pred_df, dt_col="timestamp", value_col="value", past_seq_len=1):
# target column of dataframe is "value"
# default past sequence length is 1
pred_value = pred_df[value_col].values
true_value = test_df[value_col].values[past_seq_len:]
fig, axs = plt.subplots(figsize=(12, 5))
axs.plot(pred_df[dt_col], pred_value, color='red', label='predicted values')
axs.plot(test_df[dt_col][past_seq_len:], true_value, color='blue', label='actual values')
axs.set_title('the predicted values and actual values (for the test data)')
plt.xlabel(dt_col)
plt.xticks(rotation=45)
plt.ylabel('number of taxi passengers')
plt.legend(loc='upper left')
plt.show()
# plot results of multi step forecasting
# plot at most five values for better view
# plot the predicted values and actual values (for the test data)
def plot_less_five_step_result(test_df, pred_df, dt_col="timestamp", value_col="value", past_seq_len=1):
fig, axs = plt.subplots(figsize=(12, 5))
target_value = test_df[value_col].values[past_seq_len:]
axs.plot(test_df[dt_col][past_seq_len:], target_value, color='blue', label='actual values')
value_cols=["{}_{}".format(value_col, i) for i in range(min(pred_df.shape[1] - 1, 5))]
time_delta = pred_df[dt_col][1] - pred_df[dt_col][0]
plot_color = ["g", "r", "c", "m", "y"]
for i in range(len(value_cols)):
pred_value = pred_df[value_cols[i]].values
pred_dt = pred_df[dt_col].values + time_delta * i
axs.plot(pred_dt, pred_value, color=plot_color[i], label='predicted values' + str(i))
axs.set_title('the predicted values and actual values (for the test data)')
plt.xlabel(dt_col)
plt.xticks(rotation=45)
plt.ylabel('number of taxi passengers')
plt.legend(loc='upper left')
plt.show()
# plot results of multi step forecasting
# plot result of multi step forecasting
# plot the predicted values and actual values (for the test data)
def plot_first_last_step_result(test_df, pred_df, dt_col="timestamp", value_col="value", past_seq_len=1):
fig, axs = plt.subplots(figsize=(12, 5))
target_value = test_df[value_col].values[past_seq_len:]
axs.plot(test_df[dt_col][past_seq_len:], target_value, color='blue', label='actual values')
value_cols=["{}_{}".format(value_col, i) for i in range(pred_df.shape[1] - 1)]
time_delta = pred_df[dt_col][1] - pred_df[dt_col][0]
pred_value_first = pred_df[value_cols[0]].values
pred_dt_first = pred_df[dt_col].values
axs.plot(pred_dt_first, pred_value_first, color="g", label='first predicted values')
pred_value_last = pred_df[value_cols[-1]].values
pred_dt_last = pred_df[dt_col].values + time_delta * (len(value_cols)-1)
axs.plot(pred_dt_last, pred_value_last, color="r", label='last predicted values')
axs.set_title('the predicted values and actual values (for the test data)')
plt.xlabel(dt_col)
plt.xticks(rotation=45)
plt.ylabel('number of taxi passengers')
plt.legend(loc='upper left')
plt.show()
import os
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
%pylab inline
import matplotlib.dates as md
from matplotlib import pyplot as plt
# load nyc taxi data
try:
dataset_path = os.getenv("ANALYTICS_ZOO_HOME")+"/bin/data/NAB/nyc_taxi/nyc_taxi.csv"
raw_df = pd.read_csv(dataset_path)
except Exception as e:
print("nyc_taxi.csv doesn't exist")
print("you can run $ANALYTICS_ZOO_HOME/bin/data/NAB/nyc_taxi/get_nyc_taxi.sh to download nyc_taxi.csv")
raw_df.head(5)
df = pd.DataFrame(pd.to_datetime(raw_df.timestamp))
df["value"] = raw_df["value"]
df.head()
from zoo.chronos.autots.deprecated.preprocessing.utils import train_val_test_split
train_df, val_df, test_df = train_val_test_split(df, val_ratio=0.1, test_ratio=0.1)
train_df.describe()
train_df.head()
# shape of the dataframe
print("The shape of train_df is", train_df.shape)
print("The shape of val_df is", val_df.shape)
print("The shape of test_df is", test_df.shape)
# visualisation of anomaly throughout time in train_df
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
fig, ax = plt.subplots(figsize=(12, 5))
# pd.plotting.deregister_matplotlib_converters()
ax.plot(train_df['timestamp'], train_df['value'], color='blue', linewidth=0.6)
ax.set_title('NYC taxi passengers throughout time')
plt.xlabel('timestamp')
plt.xticks(rotation=45)
plt.ylabel('The Number of NYC taxi passengers')
plt.legend(loc='upper left')
plt.show()
# build time sequence predictor
from zoo.chronos.autots.deprecated.regression.time_sequence_predictor import TimeSequencePredictor
# you need to specify the name of datetime column and target column
# The default names are "timestamp" and "value" respectively.
tsp = TimeSequencePredictor(name="nyc_taxi_1next",
logs_dir="~/zoo_automl_logs",
dt_col="timestamp",
target_col="value",
extra_features_col=None)
from zoo import init_spark_on_local
from zoo.ray import RayContext
sc = init_spark_on_local(cores=4)
ray_ctx = RayContext(sc=sc, object_store_memory="1g")
ray_ctx.init()
%%time
from zoo.chronos.autots.deprecated.config.recipe import LSTMGridRandomRecipe
# fit train_df and validate with val_df, return the best trial as pipeline.
# the default recipe is SmokeRecipe,which runs one epoch and one iteration with only 1 random sample.
# you can change recipe by define `recipe` in `fit`. The recipes you can choose are SmokeRecipe, RandomRecipe, LSTMGridRandomRecipe, GridRandomRecipe and BayesRecipe.
look_back_single = 5
pipeline = tsp.fit(train_df,
validation_df=val_df,
metric="mse",
recipe=LSTMGridRandomRecipe(
num_rand_samples=1,
epochs=2,
look_back=look_back_single,
batch_size=[64]))
print("Training completed.")
#%load_ext tensorboard
#%tensorboard --logdir <logs_dir>/<job_name>_leaderboard/
# predict test_df with the best trial
pred_df = pipeline.predict(test_df)
pred_df.head(5)
# prediction value start from look_back_single
test_df[look_back_single:look_back_single+5]
# plot the predicted values and actual values
plot_result(test_df, pred_df,past_seq_len=look_back_single)
# evaluate test_df
mse, smape = pipeline.evaluate(test_df, metrics=["mse", "smape"])
print("Evaluate: the mean square error is", mse)
print("Evaluate: the smape value is", smape)
# save the pipeline with best trial
pipeline.save("/tmp/saved_pipeline/my.ppl")
from zoo.chronos.autots.deprecated.pipeline.time_sequence import load_ts_pipeline
new_pipeline = load_ts_pipeline("/tmp/saved_pipeline/my.ppl")
# you can do predict and evaluate again
# we use test_df as input in order to compare results before and after restoration
new_pred = new_pipeline.predict(test_df)
new_pred.head(5)
# evaluate test_df
mse, smape = new_pipeline.evaluate(test_df, metrics=["mse", "smape"])
print("Evaluate: the mean square error is", mse)
print("Evaluate: the smape value is", smape)
# review the initialization infomation if needed
new_pipeline.describe()
# Use val_df as incremental data
new_pipeline.fit(val_df,epoch_num=5)
# predict results of test_df
new_pred_df = new_pipeline.predict(test_df)
plot_result(test_df, new_pred_df,past_seq_len = look_back_single)
# evaluate test_df
mse, smape = new_pipeline.evaluate(test_df, metrics=["mse", "smape"])
print("Evaluate: the mean square error is", mse)
print("Evaluate: the smape value is", smape)
# build time sequence predictor
from zoo.chronos.autots.deprecated.regression.time_sequence_predictor import TimeSequencePredictor
# change future_seq_len into the step you want to forcast.
tsp = TimeSequencePredictor(name="nyc_taxi_10next",
logs_dir="~/zoo_automl_logs",
future_seq_len=5,
dt_col="timestamp",
target_col="value",
extra_features_col=None)
%%time
# you can specify the look back sequence length with a single number or a range of (min_len, max_len) in recipe.
look_back_multi = 10
pipeline = tsp.fit(train_df,
validation_df=val_df,
metric="mse",
recipe=LSTMGridRandomRecipe(
num_rand_samples=3,
epochs=2,
look_back=10,
training_iteration=look_back_multi,
batch_size=[64]))
print("Training completed.")
# test
# predict test_df with the best trial
pred_df = pipeline.predict(test_df)
pred_df.head(5)
# plot multi step predicted values and actual values
# plot at most five step predict values for better view
plot_less_five_step_result(test_df, pred_df,past_seq_len=look_back_multi)
# plot only the first and the last step predict values and actual values
plot_first_last_step_result(test_df, pred_df, past_seq_len=look_back_multi)
# evaluate test_df
mse, smape = pipeline.evaluate(test_df, metrics=["mse", "smape"])
print("Evaluate: the mean square error is", mse)
print("Evaluate: the smape value is", smape)
ray_ctx.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0. Helper function definations
Step2: 1. load data
Step3: Now we download the dataset and load it into a pandas dataframe.
Step4: Below are some example records of the data
Step5: Convert string timestamp to TimeStamp
Step6: You can use train_val_test_split to split the whole dataset into train/val/test sets. There will be two columns in the output dataframe
Step7: 2. Train and validation
Step8: We provided a leaderboard visualization tool based on tensorboard. You can install tensorboard and run the following code to view the hparams tag in tensorboard. <br>
Step9: 3. Test
Step10: 4. save and restore
Step11: 5. continue training
Step12: 6. multi step forecasting
|
7,179
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sympy as sp
from devito import *
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Model, plot_velocity
shape = (301, 501) # Number of grid point (nx, ny, nz)
spacing = (10., 10) # Grid spacing in m. The domain size is now 3km by 5km
origin = (0., 0) # What is the location of the top left corner.
# Define a velocity profile. The velocity is in km/s
v = np.empty(shape, dtype=np.float32)
v[:,:100] = 1.5
v[:,100:350] = 2.5
v[:,350:] = 4.5
# With the velocity and model size defined, we can create the seismic model that
# encapsulates these properties. We also define the size of the absorbing layer as 10 grid points
model = Model(vp=v, origin=origin, shape=shape, spacing=spacing, space_order=4, nbl=40, bcs="damp")
plot_velocity(model)
from examples.seismic import TimeAxis
t0 = 0. # Simulation starts a t=0
tn = 2400. # Simulation last 2.4 second (2400 ms)
dt = model.critical_dt # Time step from model grid spacing
time_range = TimeAxis(start=t0, stop=tn, step=dt)
nrcv = 250 # Number of Receivers
#NBVAL_IGNORE_OUTPUT
from examples.seismic import RickerSource
f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)
src = RickerSource(name='src', grid=model.grid, f0=f0,
npoint=1, time_range=time_range)
# Define the wavefield with the size of the model and the time dimension
u = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=4)
# We can now write the PDE
pde = model.m * u.dt2 - u.laplace + model.damp * u.dt
stencil = Eq(u.forward, solve(pde, u.forward))
src.coordinates.data[:, 0] = 400 # Source coordinates
src.coordinates.data[:, -1] = 20. # Depth is 20m
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Receiver
rec = Receiver(name='rec', grid=model.grid, npoint=nrcv, time_range=time_range)
rec.coordinates.data[:,0] = np.linspace(src.coordinates.data[0, 0], model.domain_size[0], num=nrcv)
rec.coordinates.data[:,-1] = 20. # Depth is 20m
# Finally we define the source injection and receiver read function to generate the corresponding code
src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)
# Create interpolation expression for receivers
rec_term = rec.interpolate(expr=u.forward)
op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)
op(time=time_range.num-1, dt=model.critical_dt)
offset = []
data = []
for i, coord in enumerate(rec.coordinates.data):
off = (src.coordinates.data[0, 0] - coord[0])
offset.append(off)
data.append(rec.data[:,i])
#NBVAL_IGNORE_OUTPUT
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
mpl.rc('font', size=16)
mpl.rc('figure', figsize=(8, 6))
def plot_traces(rec, xb, xe, t0, tn, colorbar=True):
scale = np.max(rec)/100
extent = [xb, xe, 1e-3*tn, t0]
plot = plt.imshow(rec, cmap=cm.gray, vmin=-scale, vmax=scale, extent=extent)
plt.xlabel('X position (km)')
plt.ylabel('Time (s)')
# Create aligned colorbar on the right
if colorbar:
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(plot, cax=cax)
plt.show()
plot_traces(np.transpose(data), rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)
ns = time_range.num # Number of samples in each trace
grid = Grid(shape=(ns, nrcv)) # Construction of grid with samples X traces dimension
vnmo = 1500
vguide = SparseFunction(name='v', grid=grid, npoint=ns)
vguide.data[:] = vnmo
off = SparseFunction(name='off', grid=grid, npoint=nrcv)
off.data[:] = offset
amps = SparseFunction(name='amps', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
amps.data[:] = np.transpose(data)
sample, trace = grid.dimensions
t_0 = SparseFunction(name='t0', grid=grid, npoint=ns, dimensions=[sample], shape=[grid.shape[0]])
tt = SparseFunction(name='tt', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
snmo = SparseFunction(name='snmo', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
s = SparseFunction(name='s', grid=grid, dtype=np.intc, npoint=ns*nrcv, dimensions=grid.dimensions,
shape=grid.shape)
#NBVAL_IGNORE_OUTPUT
dtms = model.critical_dt/1000 # Time discretization in ms
E1 = Eq(t_0, sample*dtms)
E2 = Eq(tt, sp.sqrt(t_0**2 + (off[trace]**2)/(vguide[sample]**2) ))
E3 = Eq(s, sp.floor(tt/dtms))
op1 = Operator([E1, E2, E3])
op1()
#NBVAL_IGNORE_OUTPUT
s.data[s.data >= time_range.num] = 0
E4 = Eq(snmo, amps[s[sample, trace], trace])
op2 = Operator([E4])
op2()
stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!
plot_traces(snmo.data, rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will create a simple velocity model here by hand for demonstration purposes. This model essentially consists of three layers, each with a different velocity
Step2: Next we define the positioning and the wave signal of our source, as well as the location of our receivers. To generate the wavelet for our sources we require the discretized values of time that we are going to use to model a multiple "shot", which depends on the grid spacing used in our model. We will use one source and eleven receivers. The source is located in the position (550, 20). The receivers start at (550, 20) with an even horizontal spacing of 100m at consistent depth.
Step3: How we are modelling a horizontal layers, we will group this traces and made a NMO correction using this set traces.
Step4: Auxiliary function for plotting traces
Step5: Common Midpoint Gather
Step6: NMO Correction
Step7: In this example we will use a constant velocity guide. The guide will be arranged in a SparseFunction with the number of points equal to number of samples in the traces.
Step8: The computed offset for each trace will be arraged in another SparseFunction with number of points equal to number of traces.
Step9: The previous modelled traces will be arranged in a SparseFunction with the same dimensions as the grid.
Step10: Now, we define SparseFunctions with the same dimensions as the grid, describing the NMO traveltime equation. The $t_0$ SparseFunction isn't offset dependent, so the number of points is equal to the number of samples.
Step11: The Equation relates traveltimes
Step12: With the computed samples, we remove all that are out of the samples range, and shift the amplitude for the correct sample.
|
7,180
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
from bigmali.grid import Grid
from bigmali.prior import TinkerPrior
from bigmali.hyperparameter import get
import numpy as np
from scipy.stats import lognorm
from numpy.random import normal
#globals that functions rely on
grid = Grid()
prior = TinkerPrior(grid)
a_seed = get()[:-1]
S_seed = get()[-1]
mass_points = prior.fetch(grid.snap(0)).mass[2:-2] # cut edges
tmp = np.loadtxt('/Users/user/Code/PanglossNotebooks/MassLuminosityProject/SummerResearch/mass_mapping.txt')
z_data = tmp[:,0]
lobs_data = tmp[:,1]
mass_data = tmp[:,2]
ra_data = tmp[:,3]
dec_data = tmp[:,4]
sigobs = 0.05
def fast_lognormal(mu, sigma, x):
return (1/(x * sigma * np.sqrt(2 * np.pi))) * np.exp(- 0.5 * (np.log(x) - np.log(mu)) ** 2 / sigma ** 2)
def p1_eval(zk):
return prior.fetch(grid.snap(zk)).prob[2:-2]
def p2_samp(nas=100):
a is fixed on hyperseed,
S is normal distribution centered at hyperseed.
return normal(S_seed, S_seed / 10, size=nas)
def p3_samp(mk, a, S, zk, nl=100):
mu_lum = np.exp(a[0]) * ((mk / a[2]) ** a[1]) * ((1 + zk) ** (a[3]))
return lognorm(S, scale=mu_lum).rvs(nl)
def p4_eval(lobsk, lk, sigobs):
return fast_lognormal(lk, sigobs, lobsk)
def f(a, S, zk, lobsk, nl=100):
ans = []
for mk in mass_points:
tot = 0
for x in p3_samp(mk, a, S, zk, nl):
tot += p4_eval(lobsk, x, sigobs)
ans.append(tot / nl)
return ans
def mass_dist(ind=1, nas=10, nl=100):
lobsk = lobs_data[ind]
zk = z_data[ind]
tot = np.zeros(len(mass_points))
for S in p2_samp(nas):
tot += f(a_seed, S, zk, lobsk, nl)
prop = p1_eval(zk) * tot / nas
return prop / np.trapz(prop, x=mass_points)
plt.subplot(3,3,1)
dist = p1_eval(zk)
plt.plot(mass_points, dist)
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
plt.ylim([10**-25, 10])
plt.xlim([mass_points.min(), mass_points.max()])
plt.title('Prior')
plt.xlabel(r'Mass $(M_\odot)$')
plt.ylabel('Density')
for ind in range(2,9):
plt.subplot(3,3,ind)
dist = mass_dist(ind)
plt.plot(mass_points, dist, alpha=0.6, linewidth=2)
plt.xlim([mass_points.min(), mass_points.max()])
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
plt.ylim([10**-25, 10])
plt.gca().axvline(mass_data[ind], color='red', linewidth=2, alpha=0.6)
plt.title('Mass Distribution')
plt.xlabel(r'Mass $(M_\odot)$')
plt.ylabel('Density')
# most massive
ind = np.argmax(mass_data)
plt.subplot(3,3,9)
dist = mass_dist(ind)
plt.plot(mass_points, dist, alpha=0.6, linewidth=2)
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
plt.xlim([mass_points.min(), mass_points.max()])
plt.ylim([10**-25, 10])
plt.gca().axvline(mass_data[ind], color='red', linewidth=2, alpha=0.6)
plt.title('Mass Distribution')
plt.xlabel(r'Mass $(M_\odot)$')
plt.ylabel('Density')
# plt.tight_layout()
plt.gcf().set_size_inches((10,6))
index = range(2,9) + [np.argmax(mass_data)]
plt.title('Simple Sketch of Field of View')
plt.scatter(ra_data[index], dec_data[index] , s=np.log(mass_data[index]), alpha=0.6)
plt.xlabel('ra')
plt.ylabel('dec');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Probability Functions
Step3: Results
Step4: Turning into Probabilistic Catalogue
|
7,181
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from pyquickhelper.helpgen import NbImage
NbImage("images/dicho.png")
def recherche_dichotomique(element, liste_triee):
a = 0
b = len(liste_triee)-1
m = (a+b)//2
while a < b :
if liste_triee[m] == element:
return m
elif liste_triee[m] > element:
b = m-1
else :
a = m+1
m = (a+b)//2
return a
li = [0, 4, 5, 19, 100, 200, 450, 999]
recherche_dichotomique(5, li)
def recherche_dichotomique_recursive( element, liste_triee, a = 0, b = -1 ):
if a == b :
return a
if b == -1 :
b = len(liste_triee)-1
m = (a+b)//2
if liste_triee[m] == element:
return m
elif liste_triee[m] > element:
return recherche_dichotomique_recursive(element, liste_triee, a, m-1)
else :
return recherche_dichotomique_recursive(element, liste_triee, m+1, b)
recherche_dichotomique(5, li)
def recherche_dichotomique_recursive2(element, liste_triee):
if len(liste_triee)==1 :
return 0
m = len(liste_triee)//2
if liste_triee[m] == element:
return m
elif liste_triee[m] > element:
return recherche_dichotomique_recursive2(element, liste_triee[:m])
else :
return m + recherche_dichotomique_recursive2(element, liste_triee[m:])
recherche_dichotomique(5, li)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lorsqu'on décrit n'importe quel algorithme, on évoque toujours son coût, souvent une formule de ce style
Step2: Version itérative
Step3: Version récursive
Step4: Version récursive 2
|
7,182
|
<ASSISTANT_TASK:>
Python Code:
simplify(diff(x**n,x))
from sympy import *
init_printing()
x,n = symbols('x n')
funkcije = [1,x**n,sin(x),cos(x), exp(x),log(x)]
tabela = [[f,diff(f,x)] for f in funkcije]
tabela
from pandas import DataFrame
DataFrame(tabela,columns={"$f(x)$","$f'(x)$"})
# za lepši izpis uporabimo funkcijo latex
tabela =[['$$%s$$' % latex(f),'$$%s$$'% latex(diff(f,x))] for f in funkcije ]
DataFrame(tabela,columns={"funkcija $f(x)$","odvod $f'(x)$"})
%%javascript
MathJax.Hub.Config({
"HTML-CSS": { linebreaks: {automatic: false } }
});
// preprečimo prelom vrstic v tabeli
f,g = symbols("f,g")
import pandas as pd
pd.options.display.max_colwidth=1000
operacije = [f(x)+g(x), f(x)*g(x), f(x)/g(x), f(x)**g(x),f(g(x))]
fmt = "$$%s$$"
tabela = [[fmt % latex(op),fmt % latex(simplify(diff(op,x)))] for op in operacije]
df_tabela = DataFrame(tabela, columns=["funkcija $f(x)$","pravilo za odvod $f'(x)$"],)
df_tabela
import disqus
%reload_ext disqus
%disqus matpy
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lepši izpis tabele dobimo, če uporabimo knjižnico za delo s tabelami in podatki Pandas.
Step2: Pravila za odvajanje
Step3: Naloga
|
7,183
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy import integrate
def trapz(f, a, b, N):
Integrate the function f(x) over the range [a,b] with N points.
x = np.linspace(a,b,N+1)
h = np.diff(x)[1]
y = f(x)
m = .5 * (y[1:(len(y)-1)] + y[2:])
a = sum(h * m)
return(a)
np.linspace(0,1,2)
trapz(f, 0, 1, 1000)
f = lambda x: x**2
g = lambda x: np.sin(x)
I = trapz(f, 0, 1, 1000)
assert np.allclose(I, 0.33333349999999995)
J = trapz(g, 0, np.pi, 1000)
assert np.allclose(J, 1.9999983550656628)
integrate.quad(f,0,1)[0] - trapz(f, 0, 1, 1000)
integrate.quad(g,0, np.pi)[0] - trapz(g,0,np.pi, 1000)
assert True # leave this cell to grade the previous one
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Trapezoidal rule
Step3: Now use scipy.integrate.quad to integrate the f and g functions and see how the result compares with your trapz function. Print the results and errors.
|
7,184
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Tal Linzen <linzen@nyu.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD (3-clause)
import pandas as pd
import mne
from mne.stats import linear_regression, fdr_correction
from mne.viz import plot_compare_evokeds
from mne.datasets import kiloword
# Load the data
path = kiloword.data_path() + '/kword_metadata-epo.fif'
epochs = mne.read_epochs(path)
print(epochs.metadata.head())
name = "Concreteness"
df = epochs.metadata
df[name] = pd.cut(df[name], 11, labels=False) / 10
colors = {str(val): val for val in df[name].unique()}
epochs.metadata = df.assign(Intercept=1) # Add an intercept for later
evokeds = {val: epochs[name + " == " + val].average() for val in colors}
plot_compare_evokeds(evokeds, colors=colors, split_legend=True,
cmap=(name + " Percentile", "viridis"))
names = ["Intercept", name]
res = linear_regression(epochs, epochs.metadata[names], names=names)
for cond in names:
res[cond].beta.plot_joint(title=cond, ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
reject_H0, fdr_pvals = fdr_correction(res["Concreteness"].p_val.data)
evoked = res["Concreteness"].beta
evoked.plot_image(mask=reject_H0, time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Psycholinguistically relevant word characteristics are continuous. I.e.,
Step2: We observe that there appears to be a monotonic dependence of EEG on
Step3: Because the
|
7,185
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (3, 6)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Dense(4))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(4000 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['wrappers.TimeDistributed.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 4, 4, 2)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Conv2D(6, (3,3), data_format='channels_last'))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(4010 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['wrappers.TimeDistributed.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [wrappers.TimeDistributed.1] wrap a Conv2D layer with 6 3x3 filters (input
Step2: export for Keras.js tests
|
7,186
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import numpy.ma as ma
from scipy.integrate import odeint
mag = lambda r: np.sqrt(np.sum(np.power(r, 2)))
def g(y, t, q, m, n,d, k):
n: the number of particles
d: the number of dimensions
(for fun's sake I want this
to work for k-dimensional systems)
y: an (n*2,d) dimensional matrix
where y[:n]_i is the position
of the ith particle and
y[n:]_i is the velocity of
the ith particle
qs: the particle charges
ms: the particle masses
k: the electric constant
t: the current timestamp
y = y.reshape((n*2,d))
v = np.array(y[n:])
# rj across, ri down
rs_from = np.tile(y[:n], (n,1,1))
# ri across, rj down
rs_to = np.transpose(rs_from, axes=(1,0,2))
# directional distance between each r_i and r_j
# dr_ij is the force from j onto i, i.e. r_i - r_j
dr = rs_to - rs_from
# Used as a mask
nd_identity = np.eye(n).reshape((n,n,1))
# Force magnitudes
drmag = ma.array(
np.sqrt(
np.sum(
np.power(dr, 2), 2)),
mask=nd_identity)
# Pairwise q_i*q_j for force equation
qsa = np.tile(q, (n,1))
qsb = np.tile(q, (n,1)).T
qs = qsa*qsb
# Directional forces
Fs = (1./np.power(drmag,2)).reshape((n,n,1))
# Net Force
Fnet = np.sum(Fs, 1)
# Dividing by m to obtain acceleration vectors
a = np.sum(Fnet*dr, 1)
# Sliding integrated acceleration
# (i.e. velocity from previous iteration)
# to the position derivative slot
y[:n] = np.array(y[n:])
# Entering the acceleration into the velocity slot
y[n:] = np.array(a)
# Flattening it out for scipy.odeint to work
return y.reshape(n*d*2)
t_f = 10
t = np.linspace(0, 20, num=t_f)
# Number of dimensions
d = 2
# Number of point charges
n = 3
# charge magnitudes, currently all equal to 1
q = np.ones(n)
# masses
m = np.ones(n)
# The electric constant
# k=1/(4*pi*epsilon_naught)
# Right now we will set it to 1
# because for our tests we are choosing all q_i =1.
# Therefore, k*q is too large a number and causes
# roundoff errors in the integrator.
# In truth:
# k = 8.99*10**9
# But for now:
k=1.
r1i = np.array([-2., 0.5])
dr1dti = np.array([2.,0.])
r2i = np.array([30.,0.])
dr2dti = np.array([-2., 0.])
r3i = np.array([16.,16.])
dr3dti = np.array([0, -2.])
y0 = np.array([r1i, r2i, r3i, dr1dti, dr2dti, dr3dti]).flatten()
# Doing the integration
yf = odeint(g, y0, t, args=(q,m,n,d,k)).reshape(t_f,n*2,d)
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = fig.add_subplot(111)
ys1 = yf[:,0,1]
xs1 = yf[:,0,0]
xs2 = yf[:,1,0]
ys2 = yf[:,1,1]
xs3 = yf[:,2,0]
ys3 = yf[:,2,1]
ax.plot(xs1[:1], ys1[:1],'bv')
ax.plot(xs1[-1:], ys1[-1:], 'rv')
ax.plot(xs2[:1], ys2[:1], 'bv')
ax.plot(xs2[-1:], ys2[-1:], 'rv')
ax.plot(xs3[:1], ys3[:1], 'bv')
ax.plot(xs3[-1:], ys3[-1:], 'rv')
# minx = np.min(y[:,[0,2],0])
# maxx = np.max(y[:,[0,2],0])
# miny = np.min(y[:,[0,2],1])
# maxy = np.max(y[:,[0,2],1])
ax.plot(xs1, ys1)
ax.plot(xs2, ys2)
ax.plot(xs3, ys3)
# plt.xlim(xmin=minx, xmax=maxx)
# plt.ylim(ymin=miny, ymax=maxy)
plt.title("Paths of 3 Colliding Electric Particles")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Point Charge Dynamics
Step2: Let's define our time intervals, so that odeint knows which time stamps to iterate over.
Step3: Some other constants
Step4: We get to choose the initial positions and velocities of our particles. For our initial tests, we'll set up 3 particles to collide with eachother.
Step5: And pack them into an initial state variable we can pass to odeint.
Step6: The Fun Part – Doing the Integration
|
7,187
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Import the necessary libraries
import csv, os
from shapely.geometry import Point, mapping
import fiona, shapely
from fiona import Collection
import numpy as np
print "fiona version: {}".format(fiona.__version__)
print "shapely version: {}".format(shapely.__version__)
print "gdal version: {}".format(fiona.__gdal_version__)
print "numpy version: {}".format(np.__version__)
# Assign file_path
pth = "/mnt/hgfs/shared_ubuntu/APL/OOI/OOI_ipynb/"
fname = 'Nanoos.gpkg'
fcsv = "OOI_Assets.csv"
with open(os.path.join(pth,fcsv),'rb') as f:
reader = csv.DictReader(f)
for row in reader:
print row # Notice that numbers are strings in this case
with open(os.path.join(pth,fcsv), 'rb') as f:
reader = csv.DictReader(f)
for row in reader:
point = Point(float(row['Lon']),float(row['Lat']))
print point
import pandas as pd
from geopandas import GeoDataFrame
from shapely.geometry import Point
import matplotlib.pyplot as plt
import geopandas as gpd
import pyproj
print "geopandas version: {}".format(gpd.__version__)
# Test reading geopackage
geopackage = gpd.read_file(os.path.join(pth,fname))
geopackage.head(2)
df = pd.read_csv(os.path.join(pth,fcsv))
# Assign CRS, retrieved from epsg.io, the example below is EPSG:4326
crs = 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]'
geometry = [Point(xy) for xy in zip(df.Lon, df.Lat)]
geo_df = GeoDataFrame(df, crs=crs, geometry=geometry)
print "Original Column Header: {}\n".format(geo_df.columns.values)
# Renamed the problematic keys
renamed = geo_df.rename(columns={'Provider URL':'Provider_URL',
'Provider':'Provider',
'Provider Type':'Provider_Type',
'State / Province':'State_or_Province'})
print "Renamed Column Header: {}".format(renamed.columns.values)
# Removing the problematic keys
# Problematic keys can either be renamed or removed.
# package = geo_df.drop(geo_df.columns[[8,9,10,11]],axis=1)
# print package.columns.values
# Write the renamed geodataframe to a geopackage
renamed.to_file('OOI_Assets.gpkg',driver='GPKG')
# Check if the geopackage was written correctly
test = gpd.read_file('OOI_Assets.gpkg')
test
# Import the Catalog module
from geoserver.catalog import Catalog
# Import subprocess to use cURL REST API since gsconfig, doesn't seem to have this capability anymore
import subprocess
# Retrieve catalog from Geoserver Instance via REST (REpresentational State Transfer)
cat = Catalog("http://data.nanoos.org/geoserver2_8/rest", username='####', password='####')
# Get list of workspaces
print cat.get_workspaces()
# Get workspace
nvs = cat.get_workspace('nvs_assets')
print nvs.name
# Create the geopackage datastore
gpkg_ds = cat.create_datastore('OOI_Assets', workspace=nvs)
# Edit the connection parameters
gpkg_ds.connection_parameters = {'Connection timeout': '20',
'Evictor run periodicity': '300',
'Evictor tests per run': '3',
'Expose primary keys': 'false',
'Max connection idle time': '300',
'Test while idle': 'true',
'database': 'file:data/geopackages/OOI_Assets.gpkg', # Point to location of geopackage relative to the geoserver data directory
'dbtype': 'geopkg',
'fetch size': '1000',
'max connections': '10',
'min connections': '1',
'namespace': 'http://data.nanoos.org/geoserver2_8/nvs_assets', # Workspace URL
'validate connections': 'true'}
# Save datastore
cat.save(gpkg_ds)
# Set necessary variables for cURL
data_name = 'OOI_Assets'
wksp_name = nvs.name
ds_name = gpkg_ds.name
print ds_name
# Create layer from geopackage table
subprocess.call('curl -v -u ####:#### -XPOST -H "Content-type: text/xml" -d "<featureType><name>{0}</name></featureType>" http://data.nanoos.org/geoserver2_8/rest/workspaces/{1}/datastores/{2}/featuretypes'.format(data_name,wksp_name,ds_name), shell=True)
# get the newly published layer w/o any projection
layer = cat.get_layer(data_name)
# retrieve resource to assign projection
rsrc = layer.resource
# assign Layer projection
rsrc.projection = 'EPSG:4326'
# save layer
cat.save(rsrc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading csv and printing as dictionary
Step2: Use shapely to make points
Step3: Geopandas reading a geopackage
Step4: Write geopandas dataframe to geopackage
Step5: Uploading Geopackage to GeoServer
|
7,188
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage.filters as scnf
import sys
# Add a new path with needed .py files.
sys.path.insert(0, 'C:\Users\Dominik\Documents\GitRep\kt-2015-DSPHandsOn\MedianFilter\Python')
import gitInformation
gitInformation.printInformation()
% matplotlib inline
# Creates a sine wave with wave number 5.
data = np.fromfunction(lambda x: np.sin(x/1024*2*np.pi*5), (1024,))
# Creates an array with random values and the same length as the data array.
noise = np.random.normal(0,1.0,len(data))
signal = data + noise
signal1 = signal
plt.plot(data)
plt.plot(signal1)
plt.title("Noised sine wave")
# Moving averege filter with a length of 30, 5 times calculated.
for i in range (0,5):
signal = np.convolve(signal, np.ones(30)/30, mode = 'same')
plt.plot(signal)
plt.title("Result of the moving averege filter")
# Gaussian filter of the noised signal with a standard deviation for Gaussian kernel of 30.
smoothed = scnf.gaussian_filter(signal1, 30)
plt.plot(smoothed)
plt.title("Result of the gaussian filter")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I am trying to remove white noise from the original wave with different filters.
Step2: Smooth the signal with a moving averege filter.
Step3: Smooth the signal with a gaussian filter.
|
7,189
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('../input')
from flight_revenue_simulator import simulate_revenue, score_me
def pricing_function(days_left, tickets_left, demand_level):
Sample pricing function
price = demand_level - 10
return price
simulate_revenue(days_left=7, tickets_left=50, pricing_function=pricing_function, verbose=True)
score_me(pricing_function)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: In case you want to check your understanding of the simulator logic, here is a simplified version of some of the key logic (leaving out the code that prints your progress). If you feel you understand the description above, you can skip reading this code.
Step3: To see a small example of how your code works, test it with the following function
Step4: You can try simulations for a variety of values.
|
7,190
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15]],columns=['A','B','C','D','E'])
def g(df):
df.index += 1
df_out = df.stack()
df.index -= 1
df_out.index = df_out.index.map('{0[1]}_{0[0]}'.format)
return df_out.to_frame().T
df = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,191
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2016 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# !pip install --upgrade google-api-python-client
import io, os, subprocess, sys, time, datetime, requests, itchat
from itchat.content import *
from googleapiclient.discovery import build
# Here I read in my own API_KEY from a file, which is not shared in Github repository:
# with io.open('../../API_KEY.txt') as fp:
# for line in fp: APIKEY = line
# You need to un-comment below line and replace 'APIKEY' variable with your own GCP API key:
APIKEY='AIzaSyCvxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
# Below is for GCP Language Tranlation API
service = build('translate', 'v2', developerKey=APIKEY)
# Import the base64 encoding library.
import base64
# Pass the image data to an encoding function.
def encode_image(image_file):
with open(image_file, "rb") as image_file:
image_content = image_file.read()
return base64.b64encode(image_content)
# control parameter for Image API:
parm_image_maxResults = 10 # max objects or faces to be extracted from image analysis
# control parameter for Language Translation API:
parm_translation_origin_language = '' # original language in text: to be overwriten by TEXT_DETECTION
parm_translation_target_language = 'zh' # target language for translation: Chinese
# Running Vision API
# 'LABEL_DETECTION'
def KudosData_LABEL_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 物体识别 ]\n'
# 'LABEL_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['labelAnnotations'])):
image_analysis_reply += responses['responses'][0]['labelAnnotations'][i]['description'] \
+ '\n( confidence ' + str(responses['responses'][0]['labelAnnotations'][i]['score']) + ' )\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
# Running Vision API
# 'LANDMARK_DETECTION'
def KudosData_LANDMARK_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 地标识别 ]\n'
# 'LANDMARK_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['landmarkAnnotations'])):
image_analysis_reply += responses['responses'][0]['landmarkAnnotations'][i]['description'] \
+ '\n( confidence ' + str(responses['responses'][0]['landmarkAnnotations'][i]['score']) + ' )\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
# Running Vision API
# 'LOGO_DETECTION'
def KudosData_LOGO_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 商标识别 ]\n'
# 'LOGO_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['logoAnnotations'])):
image_analysis_reply += responses['responses'][0]['logoAnnotations'][i]['description'] \
+ '\n( confidence ' + str(responses['responses'][0]['logoAnnotations'][i]['score']) + ' )\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
# Running Vision API
# 'TEXT_DETECTION'
def KudosData_TEXT_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 文字提取 ]\n'
# 'TEXT_DETECTION'
if responses['responses'][0] != {}:
image_analysis_reply += u'----- Start Original Text -----\n'
image_analysis_reply += u'( Original Language 原文: ' + responses['responses'][0]['textAnnotations'][0]['locale'] \
+ ' )\n'
image_analysis_reply += responses['responses'][0]['textAnnotations'][0]['description'] + '----- End Original Text -----\n'
##############################################################################################################
# translation of detected text #
##############################################################################################################
parm_translation_origin_language = responses['responses'][0]['textAnnotations'][0]['locale']
# Call translation if parm_translation_origin_language is not parm_translation_target_language
if parm_translation_origin_language != parm_translation_target_language:
inputs=[responses['responses'][0]['textAnnotations'][0]['description']] # TEXT_DETECTION OCR results only
outputs = service.translations().list(source=parm_translation_origin_language,
target=parm_translation_target_language, q=inputs).execute()
image_analysis_reply += u'\n----- Start Translation -----\n'
image_analysis_reply += u'( Target Language 译文: ' + parm_translation_target_language + ' )\n'
image_analysis_reply += outputs['translations'][0]['translatedText'] + '\n' + '----- End Translation -----\n'
print('Compeleted: Translation API ...')
##############################################################################################################
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
# Running Vision API
# 'FACE_DETECTION'
def KudosData_FACE_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 面部表情 ]\n'
# 'FACE_DETECTION'
if responses['responses'][0] != {}:
for i in range(len(responses['responses'][0]['faceAnnotations'])):
image_analysis_reply += u'----- No.' + str(i+1) + ' Face -----\n'
image_analysis_reply += u'>>> Joy 喜悦: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'joyLikelihood'] + '\n'
image_analysis_reply += u'>>> Anger 愤怒: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'angerLikelihood'] + '\n'
image_analysis_reply += u'>>> Sorrow 悲伤: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'sorrowLikelihood'] + '\n'
image_analysis_reply += u'>>> Surprise 惊奇: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'surpriseLikelihood'] + '\n'
image_analysis_reply += u'>>> Headwear 头饰: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'headwearLikelihood'] + '\n'
image_analysis_reply += u'>>> Blurred 模糊: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'blurredLikelihood'] + '\n'
image_analysis_reply += u'>>> UnderExposed 欠曝光: \n' \
+ responses['responses'][0]['faceAnnotations'][i][u'underExposedLikelihood'] + '\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
# Running Vision API
# 'SAFE_SEARCH_DETECTION'
def KudosData_SAFE_SEARCH_DETECTION(image_base64, API_type, maxResults):
vservice = build('vision', 'v1', developerKey=APIKEY)
request = vservice.images().annotate(body={
'requests': [{
'image': {
# 'source': {
# 'gcs_image_uri': IMAGE
# }
"content": image_base64
},
'features': [{
'type': API_type,
'maxResults': maxResults,
}]
}],
})
responses = request.execute(num_retries=3)
image_analysis_reply = u'\n[ ' + API_type + u' 不良内容 ]\n'
# 'SAFE_SEARCH_DETECTION'
if responses['responses'][0] != {}:
image_analysis_reply += u'>>> Adult 成人: \n' + responses['responses'][0]['safeSearchAnnotation'][u'adult'] + '\n'
image_analysis_reply += u'>>> Violence 暴力: \n' + responses['responses'][0]['safeSearchAnnotation'][u'violence'] + '\n'
image_analysis_reply += u'>>> Spoof 欺骗: \n' + responses['responses'][0]['safeSearchAnnotation'][u'spoof'] + '\n'
image_analysis_reply += u'>>> Medical 医疗: \n' + responses['responses'][0]['safeSearchAnnotation'][u'medical'] + '\n'
else:
image_analysis_reply += u'[ Nill 无结果 ]\n'
return image_analysis_reply
itchat.auto_login(hotReload=True) # hotReload=True: 退出程序后暂存登陆状态。即使程序关闭,一定时间内重新开启也可以不用重新扫码。
# itchat.auto_login(enableCmdQR=-2) # enableCmdQR=-2: 命令行显示QR图片
# @itchat.msg_register([PICTURE], isGroupChat=True)
@itchat.msg_register([PICTURE])
def download_files(msg):
parm_translation_origin_language = 'zh' # will be overwriten by TEXT_DETECTION
msg.download(msg.fileName)
print('\nDownloaded image file name is: %s' % msg['FileName'])
image_base64 = encode_image(msg['FileName'])
##############################################################################################################
# call image analysis APIs #
##############################################################################################################
image_analysis_reply = u'[ Image Analysis 图像分析结果 ]\n'
# 1. LABEL_DETECTION:
image_analysis_reply += KudosData_LABEL_DETECTION(image_base64, 'LABEL_DETECTION', parm_image_maxResults)
# 2. LANDMARK_DETECTION:
image_analysis_reply += KudosData_LANDMARK_DETECTION(image_base64, 'LANDMARK_DETECTION', parm_image_maxResults)
# 3. LOGO_DETECTION:
image_analysis_reply += KudosData_LOGO_DETECTION(image_base64, 'LOGO_DETECTION', parm_image_maxResults)
# 4. TEXT_DETECTION:
image_analysis_reply += KudosData_TEXT_DETECTION(image_base64, 'TEXT_DETECTION', parm_image_maxResults)
# 5. FACE_DETECTION:
image_analysis_reply += KudosData_FACE_DETECTION(image_base64, 'FACE_DETECTION', parm_image_maxResults)
# 6. SAFE_SEARCH_DETECTION:
image_analysis_reply += KudosData_SAFE_SEARCH_DETECTION(image_base64, 'SAFE_SEARCH_DETECTION', parm_image_maxResults)
print('Compeleted: Image Analysis API ...')
return image_analysis_reply
itchat.run()
# interupt kernel, then logout
itchat.logout() # 安全退出
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 导入需要用到的一些功能程序库:
Step2: Using Google Cloud Platform's Machine Learning APIs
Step3: 图片二进制base64码转换 (Define image pre-processing functions)
Step4: 机器智能API接口控制参数 (Define control parameters for API)
Step5: * 识别图片消息中的物体名字 (Recognize objects in image)
Step6: * 识别图片消息中的物体名字 (Recognize objects in image)
Step7: * 识别图片消息中的物体名字 (Recognize objects in image)
Step8: * 识别图片消息中的文字 (OCR
Step9: * 识别人脸 (Recognize human face)
Step10: * 不良内容识别 (Explicit Content Detection)
Step11: 用微信App扫QR码图片来自动登录
|
7,192
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
sys.path.append(os.environ["SPARK_HOME"] + "/python/lib/py4j-0.9-src.zip")
sys.path.append(os.environ["SPARK_HOME"] + "/python/lib/pyspark.zip")
from pyspark import SparkConf, SparkContext
from pyspark import SparkFiles
from pyspark import StorageLevel
from pyspark import AccumulatorParam
sconf = SparkConf()
sconf.setAppName("PySpark Tutorial")
sconf.setMaster("spark://snehasish-barmans-macbook.local:7077")
sc = SparkContext.getOrCreate(conf = sconf)
print sc
print sc.version
sc.parallelize([1,2, "abc", (1,2), {4,5,6}]).collect()
rdd = sc.parallelize([1,2,3,4,5,6,7,8,9,10], 3)
rdd2 = sc.parallelize(xrange(10, 20), 3)
print rdd.glom().collect() # shows data grouped by partitions
print rdd2.glom().collect()
rdd.map(lambda x: x**2).collect() # map 1-to-1 transformation, operates on every element of rdd
# functions must be self-contained, no states or access global variables
def trans1(x):
return x**2
rdd.map(trans1).collect()
rdd.filter(lambda x: x > 5).collect() # filter
datasets = "../../../Machine_Learning/WPI_ML/datasets"
print os.path.exists(datasets)
#print os.path.realpath(datasets)
# default is hdfs filesystem, to access local files, use namespace -> file:///
textrdd = sc.textFile("file:///" + os.path.realpath(datasets) + "/Audio_Standardization_ Sentences.txt",
use_unicode = False, minPartitions = 3)
textrdd.glom().collect()
textrdd.flatMap(lambda x: x.split(" ")).take(11) # 1-to-many transformation, puts in a global list
def countWordsInPartition(iterator):
@params:
iterator: a partition of the rdd
count = 0
for x in iterator:
count += len(x.split(" "))
yield count
textrdd.mapPartitions(countWordsInPartition).collect() # same as map but operates on each chunk/partition of the rdd
rdd.sortBy(keyfunc = lambda x: x, ascending = False, numPartitions = 1).collect() # sorting
# numPartitions controls the level of parallelism
rdd.sample(withReplacement = False, fraction = 0.5, seed = 13).collect() # sampling
rdd.coalesce(1).glom().collect() # reduce no. of partitions by combining partitions from each worker, thereby minimizing network traffic
rdd.repartition(2).glom().collect()
# increases or decreases the no. of partitions, but at the cost of more network traffic,
# because Spark has to shuffle the data across the workers.
# Use coalesce when intent to decrease the partitions.
rdd.repartition(5).glom().collect()
rdd.union(rdd2).collect() # combines two rdds -> A u B
rdd.intersection(rdd2).collect() # intersection -> A n B
rdd.subtract(rdd2).collect() # subtract -> A - B, removes all the common elements between A and B from A and returns the rest
rdd.union(rdd2).distinct().sortBy(ascending = True, keyfunc = lambda x:x).collect() # distinct
rdd.cartesian(rdd2).take(5) # all pair combinations; creates key-value RDD
rdd.zip(rdd2).collect() # zip (same as zip() in python); creates key-value RDD
rdd.keyBy(lambda x: x % 3).collect() # keyBy (converts a normal RDD into key-value RDD based on a criteria)
# result of the criteria becomes the 'key' and the element itself becomes the 'value'.
print rdd.groupBy(lambda x: x % 3).collect() # groupBy - same as 'keyBy' but all the values of a key are grouped into an iterable
print list(rdd.groupBy(lambda x: x % 3).collect()[0][1])
file_name = "square_nums.py"
sc.addFile("./" + file_name) # All workers will download this file to their node
rdd.pipe("cat").collect() # pipe
# Use an external program for custom transformations.
# Reads data as string per partition from standard input and writes as string to standard output.
rdd.pipe(SparkFiles.get(file_name)).glom().collect() # pipe
rdd.reduce(lambda acc, x: acc+x) # reduce; operation must satisfy associative and communtative property
rdd.count() # count
rdd.take(4) # take (returns as a list; selects data from one partition, then moves to another partition as required to satisfy the limit)
rdd.takeSample(False, 5, seed = 13) # takeSample
rdd.takeOrdered(4, key = lambda x:x) # takeOrdered
rdd.collect()
rdd.first() # first
rdd.top(4, key = int) # top ; returns top n items in descending order
rdd.countApprox(1000, 0.5) # countApprox
rdd.countApproxDistinct(0.7) # number of distinct elements
def showValues(x):
print "hello: " + str(x)
rdd.foreach(showValues) # foreach (Applies a function to every element of rdd)
# useful to communicate to external services, accumulate values in a queue, logging info, ...
# NOTE: verify results in stderr file of the working dir
def showValuesPartition(iterator):
vals = []
for item in iterator:
vals.append("hello: " + str(item))
print vals
rdd.foreachPartition(showValuesPartition) # foreachPartition (Applies a function per partition of rdd)
rdd.max()
rdd.min()
rdd.stats()
rdd.sum()
rdd.mean()
rdd.stdev()
# must be an absolute path to directory name; default is hdfs namespace
# creates a part-xxxx file for each partition
rdd.saveAsTextFile("file:///" + os.path.realpath("./textfiles")) # saveAsTextFile
# using compression
# compresses part-xxxx file of each partition
rdd.saveAsTextFile("file:///" + os.path.realpath("./textfileszip"),
compressionCodecClass = "org.apache.hadoop.io.compress.GzipCodec") # saveAsTextFile
rdd.saveAsPickleFile("file:///" + os.path.realpath("./textfiles-pickled")) # saveAsPickleFile (faster reads, writes)
rdd.countByValue() # countByValue - returns as dict of value: count
rdd.isEmpty() # isEmpty
print rdd.getStorageLevel() # getStorageLevel
rdd.getNumPartitions() # getNumPartitions
rdd.persist(StorageLevel.DISK_ONLY)
print rdd.is_cached
print rdd.getStorageLevel()
rdd.unpersist()
print rdd.is_cached
print rdd.getStorageLevel()
krdd = sc.parallelize([("a", 1), ("a", 2), ("b", 1), ("b", 2), ("c", 1)], 2)
krdd2 = sc.parallelize([("a", 3), ("b", 3), ("d", 1)], 2)
print krdd.glom().collect()
print krdd2.glom().collect()
krdd.groupByKey().collect() # groupByKey
list(krdd.groupByKey().collect()[0][1])
krdd.reduceByKey(lambda acc, x: acc + x, numPartitions = 1).collect() # reduceByKey
# does a groupByKey, followed by reduction
# operation must obey associative and commutative properties
# numPartitions controls the level of parallelism
# http://www.learnbymarketing.com/618/pyspark-rdd-basics-examples/
# does a groupByKey, followed by custom reduce function that doesn't have to obey commutative and associative property
# define a resultset template (any data structure) with initial values
init_state_template = [0]
def mergeValuesWithinPartition(template, val):
template[0] = template[0] + val
return template
def mergePartitions(template1, template2):
template = template1[0] + template2[0]
return template
krdd.aggregateByKey(init_state_template,
mergeValuesWithinPartition,
mergePartitions).collect() # aggregateByKey
krdd.sortByKey(ascending = False, numPartitions = 1, keyfunc = lambda x: x).collect() # sortByKey (can also use sortBy)
krdd.join(krdd2).collect() # join (inner-join in SQL; returns all-pair combinations)
krdd.leftOuterJoin(krdd2).collect() # leftOuterJoin (left join in SQL)
krdd.rightOuterJoin(krdd2).collect() # rightOuterJoin (right join in SQL)
krdd.fullOuterJoin(krdd2).collect() # fullOuterJoin (full join in SQL)
krdd.cogroup(krdd2).collect() # cogroup (returns iterator one for each rdd)
print list(krdd.cogroup(krdd2).collect()[0][1][0])
print list(krdd.cogroup(krdd2).collect()[0][1][1])
print list(krdd.cogroup(krdd2).collect()[2][1][0])
print list(krdd.cogroup(krdd2).collect()[2][1][1])
krdd.mapValues(lambda x: x**2).collect() # mapValues
krdd_val_iter = sc.parallelize([("a", [1,2,3]), ("b", [4,5,6])])
krdd_val_iter.flatMapValues(lambda x: [y**2 for y in x]).collect() # flatMapValues
# works in which value is an iterable object
# unpacks all elements in the iterable into their own key-value tuple/pair; puts them in a single list
krdd_val_iter.mapValues(lambda x: [y**2 for y in x]).collect() # mapValues 1-to-1
krdd.keys().collect() # keys
krdd.values().collect() # values
krdd.collect()
krdd.count()
krdd.take(3)
krdd_dup = sc.parallelize([("a", 1), ("a", 1)])
krdd_dup.distinct().collect()
krdd.countByKey() # countByKey - number of times a key appears in the k-v rdd
krdd.lookup("a") # lookup
krdd.toDebugString() # toDebugString (identifies recursive dependencies of this rdd for debugging purposes)
krdd.collectAsMap() # collectAsMap -> return key-value RDD as a dictionary
# default accumulator accumulates only numeric (int and float) types; only does 'add' operation (commutative and associative)
accum = sc.accumulator(0, accum_param = None)
def squareValues(x):
global accum
#accum += 1
accum.add(1)
return x**2
print rdd.map(squareValues).collect()
print "No. of elements: %d" % accum.value
# custom accumulator to support any types
class CustomAccumulator(AccumulatorParam):
def zero(self, initialValue):
template = set()
template.add(initialValue)
return template
def addInPlace(self, template1, template2):
return template1.union(template2)
accum = sc.accumulator(None, accum_param = CustomAccumulator())
def squareValues(x):
global accum
accum += x
return x**2
print rdd.map(squareValues).collect()
print "No. of elements: %d" % accum.value
bb = sc.broadcast({"a": 10, "b": 15})
print bb.value
bb.unpersist() # deletes cached copies from the executors
bb.value
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Spark does lazy evaluation. If we have a chain of transformations, Spark won't execute them untill an action is invoked.
Step3: Actions
Step4: Key-Value RDD Transformations
Step5: Key-Value RDD Actions
Step6: Performance
Step7: Broadcast variable
|
7,193
|
<ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
sess = tf.Session()
print(sess.run(c))
# Change the value of x in the feed_dict
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
# GRADED FUNCTION: linear_function
def linear_function():
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
np.random.seed(1)
### START CODE HERE ### (4 lines of code)
X = tf.constant(np.random.randn(3,1), name = "X")
W = tf.constant(np.random.randn(4,3), name = "W")
b = tf.constant(np.random.randn(4,1), name = "b")
Y = tf.add(tf.matmul(W, X), b)
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.Session()
result = sess.run(Y)
### END CODE HERE ###
# close the session
sess.close()
return result
print( "result = " + str(linear_function()))
# GRADED FUNCTION: sigmoid
def sigmoid(z):
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
### START CODE HERE ### ( approx. 4 lines of code)
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32, name = 'x')
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run(sigmoid, feed_dict = {x:z})
### END CODE HERE ###
return result
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
# GRADED FUNCTION: cost
def cost(logits, labels):
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
### START CODE HERE ###
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32, name = 'z')
y = tf.placeholder(tf.float32, name = 'x')
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits =z, labels =y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost, feed_dict={z:logits, y:labels})
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return cost
logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(C, name="C")
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(labels, C, axis=0)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return one_hot
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = " + str(one_hot))
# GRADED FUNCTION: ones
def ones(shape):
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return ones
print ("ones = " + str(ones([3])))
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
### START CODE HERE ### (approx. 2 lines)
X = tf.placeholder(tf.float32, shape=(n_x, None))
Y = tf.placeholder(tf.float32, shape=(n_y, None))
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
parameters = model(X_train, Y_train, X_test, Y_test)
import scipy
from PIL import Image
from scipy import ndimage
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "thumbs_up.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example.
Step2: Writing and running programs in TensorFlow has the following steps
Step3: As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
Step4: Great! To summarize, remember to initialize your variables, create a session and run the operations inside the session.
Step6: When you first defined x you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you feed data to these placeholders when running the session.
Step8: Expected Output
Step10: Expected Output
Step12: Expected Output
Step14: Expected Output
Step15: Expected Output
Step16: Change the index below and run the cell to visualize some examples in the dataset.
Step17: As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.
Step19: Note that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.
Step21: Expected Output
Step23: Expected Output
Step25: Expected Output
Step27: Expected Output
Step28: Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!
Step29: Expected Output
|
7,194
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
return None, None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
return None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
return None, None, None, None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
return None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
7,195
|
<ASSISTANT_TASK:>
Python Code:
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2)
sess = tf.Session()
print(sess.run([node1, node2]))
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
adder_node
sess.run(adder_node, {a: [5], b:[3, 1]})
W = tf.Variable([.3], tf.float32, name='Weight')
b = tf.Variable([-.3], tf.float32, name="Bias")
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer()
sess.run(init)
sess.run(linear_model, {x: 2})
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run([squared_deltas, loss], {x:[1,2,3,4], y:[0,-1,-2,-3]}))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
for i in range(1000):
sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})
print(sess.run([W, b]))
features = [tf.contrib.layers.real_valued_column("x", dimension=1)]
features
import numpy as np
import tensorflow as tf
# Declare list of features, we only have one real-valued feature
def model(features, labels, mode):
# Build a linear model and predict values
W = tf.get_variable("W", [1], dtype=tf.float64)
b = tf.get_variable("b", [1], dtype=tf.float64)
y = W*features['x'] + b
# Loss sub-graph
loss = tf.reduce_sum(tf.square(y - labels))
# Training sub-graph
global_step = tf.train.get_global_step()
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = tf.group(optimizer.minimize(loss),
tf.assign_add(global_step, 1))
# ModelFnOps connects subgraphs we built to the
# appropriate functionality.
return tf.contrib.learn.ModelFnOps(
mode=mode, predictions=y,
loss=loss,
train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
# define our data set
x = np.array([1., 2., 3., 4.])
y = np.array([0., -1., -2., -3.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x}, y, 4, num_epochs=10)
# train
estimator.fit(input_fn=input_fn, steps=5)
# evaluate our model
print(estimator.evaluate(input_fn=input_fn, steps=1))
sess = tf.InteractiveSession()
a = input_fn()[0]
list(sess.run(a['x']))
with tf.variable_scope("foo", reuse=True):
a = tf.get_variable("v", [1])
sess.run(tf.global_variables_initializer())
tf.reset_default_graph()
with tf.variable_scope("bar"):
b = tf.get_variable("b", [5])
with tf.variable_scope("baz") as other_scope:
b = tf.get_variable("b", [5])
with tf.variable_scope("foo") as foo_scope:
assert foo_scope.name == "foo"
with tf.variable_scope("bar", reuse=True):
b = tf.get_variable("b", [5])
with tf.variable_scope("baz") as other_scope:
#assert other_scope.reuse == False, "reuse not false"
b = tf.get_variable("b", [5])
#sess.run(tf.global_variables_initializer())
dir(tf.get_default_graph())
g = tf.get_default_graph()
print(g.get_all_collection_keys())
print(g.get_collection("trainable_variables"))
sess = tf.InteractiveSession()
x = np.random.normal(2, 0.1, [5, 5])
centroids = x[0:2, :]
x_expanded = np.expand_dims(x, 1)
centroids_expanded = np.expand_dims(centroids, 0)
np.sum(np.square(np.subtract(x_expanded, centroids_expanded)), 2)
np.subtract(x_expanded, centroids_expanded), np.subtract(x_expanded, centroids_expanded).shape, x_expanded.shape, centroids_expanded.shape
centroids = x[0:2, :]
x_expanded = np.expand_dims(x, 0)
centroids_expanded = np.expand_dims(centroids, 1)
np.sum(np.square(np.subtract(x_expanded, centroids_expanded)), 2)
np.subtract(x_expanded, centroids_expanded), np.subtract(x_expanded, centroids_expanded).shape, x_expanded.shape, centroids_expanded.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: New functionalities
|
7,196
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import IPython.html.widgets as widgets
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.4f' % x)
pylab.rcParams['figure.figsize'] = 14, 8
pd.set_option('display.width', 400)
# load the CAPE data and clean it up a bit.
# source: http://www.econ.yale.edu/~shiller/data.htm
cape_data = pd.read_csv('CAPE data', skiprows=range(0,8), skip_blank_lines=True, \
names=['date','s&p comp price','s&p comp div','s&p comp earnings','CPI',\
'date fraction','int rate GS10','real price','real div','real earnings','CAPE','na'])
cape_data = cape_data[0:1737]
cape_data.sort('date',inplace=True)
cape_data.drop('na', axis=1,inplace=True)
cape_data.set_index('date')
cape_data[['s&p comp price', 'CPI', 'int rate GS10']] = cape_data[['s&p comp price', 'CPI','int rate GS10']].astype(float)
cape_data = cape_data.iloc[948:] # look at sample data since 1950
cape_data.head()
temp_df = cape_data.copy()
# create a column that holds the price in 2015 dollars
CPI2015 = temp_df.iloc[-1]['CPI']
temp_df['2015 price'] = temp_df['s&p comp price'] * ( CPI2015 / temp_df['CPI'] ) * .06
# ... and a column that holds the 1 year PE
temp_df['1PE'] = temp_df['s&p comp price'] / temp_df['s&p comp earnings']
# plot the price, CAPE, and 1Y PE
ax = temp_df.plot( x='date', y='2015 price', color='Black')
temp_df.plot( x='date',y='CAPE', color='Red', ax = ax )
temp_df.plot( x='date', y='1PE', color='Blue', ax=ax)
ax.legend(['S&P comp. price, 2015 dollars (scaled)','CAPE (10PE)','1YPE'], loc='upper left')
def split_periods( sell_threshold, buy_threshold, df, debug=False ):
Given an input dataframe, return two lists of subset dataframes. In the first
list are all the investable periods, and in the second list are the noninvestable
periods. Investability is determined by CAPE ratio trigger values.
Inputs:
sell_threshold - the CAPE ratio point at which to end the investable period
buy_threshold - the CAPE ratio at which to start the investable period
df - the initial dataset with the CAPE ratio column
debug - optional boolean for heavy debugging output
Outputs:
Returns two lists. The first is a list of subset dataframes that meet the investable
period criteria, and second is a list of subset dataframes that don't meet the investable
period critera.
invested_state = True
invested_periods = list()
noninvested_periods = list()
current_period = list()
# add an 'is invested' column
df['is_invested'] = None
for index, row in df.iterrows():
if row['CAPE'] > sell_threshold and invested_state == True:
# cape too high, cashing out. add the previous invested period to the invested periods list
invested_state = False
if debug:
print "sell threshold, appending " + str( df.loc[ df['date'].isin( current_period) ] )
invested_periods.append( df.loc[ df['date'].isin( current_period) ] )
current_period = list()
if row['CAPE'] < buy_threshold and invested_state == False:
# cape low enough to buy back in. add the previous noninvested period to the noninvested periods list
invested_state = True
if debug:
print "buy threshold, appending " + str( df.loc[ df['date'].isin( current_period) ] )
noninvested_periods.append( df.loc[ df['date'].isin( current_period) ] )
current_period = list()
# set this row's 'is invested' state
current_period.append( row['date'] )
df.loc[index,'is_invested'] = invested_state
# don't forget rows at the end of the dataset
if invested_state == True:
if debug:
print "end of df: appending to invested: " + str( df.loc[ df['date'].isin( current_period) ] )
invested_periods.append( df.loc[ df['date'].isin( current_period) ] )
else:
if debug:
print "end of df: appending to noninvested: " + str( df.loc[ df['date'].isin( current_period) ] )
noninvested_periods.append( df.loc[ df['date'].isin( current_period) ] )
return (invested_periods,noninvested_periods)
def compute_gain_or_loss( df ):
'''
compute the difference from the price in the first row to the price in the last row,
adding in dividends for the total.
'''
if len(df) == 0:
return 0.0
divs = df['s&p comp div'] / 12 # dividends are annualized; break it down to months.
divs_sum = divs.sum()
if len(df) == 1:
return divs_sum
return df.iloc[-1]['s&p comp price'] - df.iloc[0]['s&p comp price'] + divs_sum
def compute_interest( df ):
'''
for the time period in the dataframe, compute the interest that would be accrued
'''
intr = df['int rate GS10'] / 12 # to get monthly gs10 interest rate
intr = intr / 10 # to approximate short term interest rate
return intr.sum()
def compute_passive_gains( df ):
'''
compute the total gains or losses for the dataframe
'''
gains_list = map( compute_gain_or_loss, [ df, ] )
return sum(gains_list)
def compute_cape_strategy_gains( sell_threshold, buy_threshold, df, debug = False ):
'''
compute the total gains and losses for the dataframe, using the sell and buy thresholds
to time the investable periods
'''
# split the dataframe into invested and noninvested periods, using the thresholds
(invested_periods,noninvested_periods) = split_periods(sell_threshold, buy_threshold, df, debug )
if debug:
print "\n\n===cape invested periods" + str(invested_periods) + "\n===="
print "\n\n===cape noninvested periods " + str(noninvested_periods) + "\n===="
# compute the gain or loss for each invested period
gain_list = map( compute_gain_or_loss, invested_periods )
# compute the interest accrued for each noninvested period
int_list = map( compute_interest, noninvested_periods )
return sum(gain_list) + sum(int_list)
sample_df = cape_data.copy()
results = []
# compute the buy and hold results
baseline_gains = compute_passive_gains( sample_df )
# compute the CAPE strategy results for various sell and buy thresholds
for s in range(20,50):
for b in range( 10, 30 ):
if b > s: continue
cape_gains = compute_cape_strategy_gains( s, b, sample_df )
results.append( [ baseline_gains - cape_gains, s, b ] )
results_df = pd.DataFrame( data=results, columns=['baseline - cape return', 'sell threshold', 'buy threshold'])
baseline_df = results_df[ results_df['baseline - cape return'] > 0 ]
ax = baseline_df.plot(kind='scatter', x='sell threshold',y='buy threshold', \
color='Blue', s = baseline_df['baseline - cape return'] * .05)
cape_df = results_df[ results_df['baseline - cape return'] <= 0 ]
cape_df.plot(kind='scatter', x='sell threshold',y='buy threshold', \
color='Red', s = cape_df['baseline - cape return'] * -.05 , ax = ax )
# import and options
%matplotlib inline
import IPython.html.widgets as widgets
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.4f' % x)
pylab.rcParams['figure.figsize'] = 14, 8
pd.set_option('display.width', 400)
# read in test file for tests 1 and 2
test_data = pd.read_csv('testdata.csv', skiprows=range(0,8), skip_blank_lines=True, \
names=['date','s&p comp price','s&p comp div','s&p comp earnings','CPI',\
'date fraction','int rate GS10','real price','real div','real earnings','CAPE','na'])
test_data.drop('na', axis=1,inplace=True)
test_data.sort('date', inplace=True)
test_data.set_index('date')
#############################
#
# test 1
#
#############################
print "\n\ntest 1: baseline and cape strategies are the same"
sample_df = test_data.copy()
results = []
baseline_gains = compute_passive_gains( sample_df )
print "baseline gains " + str(baseline_gains)
assert baseline_gains == 1012.0
print "doing cape computation now"
sell_threshold = 25.0
buy_threshold = 20.0
cape_gains = compute_cape_strategy_gains( sell_threshold, buy_threshold, sample_df )
print "cape_gains " + str(cape_gains)
assert cape_gains == 1012.0
results.append( [ baseline_gains - cape_gains, sell_threshold, buy_threshold ] )
results_df = pd.DataFrame( data=results, columns=['baseline - cape return', 'sell threshold', 'buy threshold'])
assert len(results_df) == 1
assert results_df.iloc[0]['baseline - cape return'] == 0.0
assert results_df.iloc[0]['sell threshold'] == sell_threshold
assert results_df.iloc[0]['buy threshold'] == buy_threshold
print "test 1 passed"
#############################
#
# test 2
#
#############################
print "\n\ntest 2: cape strategy always in cash"
sample_df = test_data.copy()
results = []
baseline_gains = compute_passive_gains( sample_df )
print "baseline gains " + str(baseline_gains)
assert baseline_gains == 1012.0
print "doing cape computation now"
sell_threshold = 15.0
buy_threshold = 10.0
cape_gains = compute_cape_strategy_gains( sell_threshold, buy_threshold, sample_df )
print "cape_gains " + str(cape_gains)
assert cape_gains == 0.6
results.append( [ baseline_gains - cape_gains, sell_threshold, buy_threshold ] )
results_df = pd.DataFrame( data=results, columns=['baseline - cape return', 'sell threshold', 'buy threshold'])
assert len(results_df) == 1
assert results_df.iloc[0]['baseline - cape return'] == 1011.4
assert results_df.iloc[0]['sell threshold'] == sell_threshold
assert results_df.iloc[0]['buy threshold'] == buy_threshold
print "test 2 passed"
# read in test file for test 3
test_data = pd.read_csv('testdata2.csv', skiprows=range(0,8), skip_blank_lines=True, \
names=['date','s&p comp price','s&p comp div','s&p comp earnings','CPI',\
'date fraction','int rate GS10','real price','real div','real earnings','CAPE','na'])
test_data.drop('na', axis=1,inplace=True)
test_data.sort('date', inplace=True)
test_data.set_index('date')
#############################
#
# test 3
#
#############################
print "\n\ntest 3: cape strategy sometimes in cash"
sample_df = test_data.copy()
results = []
baseline_gains = compute_passive_gains( sample_df )
print "baseline gains " + str(baseline_gains)
assert baseline_gains == 12.0
print "doing cape computation now"
sell_threshold = 22.0
buy_threshold = 21.0
cape_gains = compute_cape_strategy_gains( sell_threshold, buy_threshold, sample_df, debug = True )
print "cape_gains " + str(cape_gains)
assert cape_gains == 1008.20
results.append( [ baseline_gains - cape_gains, sell_threshold, buy_threshold ] )
results_df = pd.DataFrame( data=results, columns=['baseline - cape return', 'sell threshold', 'buy threshold'])
assert len(results_df) == 1
assert results_df.iloc[0]['baseline - cape return'] == -996.2
assert results_df.iloc[0]['sell threshold'] == sell_threshold
assert results_df.iloc[0]['buy threshold'] == buy_threshold
print "test 3 passed"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's see how the dataset is structured.
Step2: Now let's plot the price, CAPE, and 1YPE data. We'll normalize the price data, adjusting for 2015 dollars and scaling it to make it readable next to the earnings ratios.
Step4: OK, time to write some code to test our sell- and buy-trigger hypothesis.
Step5: For each investable period, we'll need to figure out the price gain or loss over that period. We'll be collecting dividends, so we'll add that into the total.
Step6: For each noninvestable period, we'll be parking our money in a safe short-term interest bearing account. We'll write a function to calculate that amount.
Step7: Compute_passive_gains() calculates the gains for the baseline buy-and-hold case. It returns the change in price plus the dividends over the entire time period.
Step8: Do the calculations
Step9: Plot the results
Step10: What this tells me is that you can use the CAPE ratio to beat buy-and-hold -- but only when it's very, very high. Which isn't terribly often
|
7,197
|
<ASSISTANT_TASK:>
Python Code:
# Necessary imports
import os
import time
from nbminer.notebook_miner import NotebookMiner
from nbminer.cells.cells import Cell
from nbminer.features.features import Features
from nbminer.stats.summary import Summary
from nbminer.stats.multiple_summary import MultipleSummary
from nbminer.encoders.ast_graph.ast_graph import *
# Loading in the two corpuses
notebooks = [os.path.join('../hw_corpus', fname) for fname in os.listdir('../hw_corpus')]
hw_notebook_objs = [NotebookMiner(file) for file in notebooks]
people = os.listdir('../testbed/Final')
notebooks = []
for person in people:
person = os.path.join('../testbed/Final', person)
if os.path.isdir(person):
direc = os.listdir(person)
notebooks.extend([os.path.join(person, filename) for filename in direc if filename.endswith('.ipynb')])
notebook_objs = [NotebookMiner(file) for file in notebooks]
from nbminer.stats.multiple_summary import MultipleSummary
hw_summary = MultipleSummary(hw_notebook_objs)
final_summary = MultipleSummary(notebook_objs)
print("Number of Final notebooks: ", len(final_summary.summary_vec))
print("Number of Homework notebooks: ", len(hw_summary.summary_vec))
print("Average number of cells, Final: ", final_summary.average_number_of_cells())
print("Average number of cells, Homework: ", hw_summary.average_number_of_cells())
print("Average lines of code, Final: ", final_summary.average_lines_of_code())
print("Average lines of code, Homework: ", hw_summary.average_lines_of_code())
from nbminer.pipeline.pipeline import Pipeline
from nbminer.features.features import Features
from nbminer.preprocess.get_ast_features import GetASTFeatures
from nbminer.preprocess.get_imports import GetImports
from nbminer.preprocess.resample_by_node import ResampleByNode
from nbminer.encoders.ast_graph.ast_graph import ASTGraphReducer
from nbminer.preprocess.feature_encoding import FeatureEncoding
from nbminer.encoders.cluster.kmeans_encoder import KmeansEncoder
from nbminer.results.reconstruction_error.astor_error import AstorError
from nbminer.results.similarity.jaccard_similarity import NotebookJaccardSimilarity
a = Features(hw_notebook_objs, 'group_1')
a.add_notebooks(notebook_objs, 'group_2')
gastf = GetASTFeatures()
rbn = ResampleByNode()
gi = GetImports()
fe = FeatureEncoding()
ke = KmeansEncoder(n_clusters = 100)
#agr = ASTGraphReducer(a, threshold=20, split_call=False)
njs = NotebookJaccardSimilarity()
pipe = Pipeline([gastf, rbn, gi, fe, ke, njs])
a = pipe.transform(a)
import numpy as np
intra, inter = njs.group_average_jaccard_similarity('group_1')
print('Mean within group: ', np.mean(np.array(intra)))
print('STD within group: ', np.std(np.array(intra)))
print('Mean outside group: ', np.mean(np.array(inter)))
print('STD outside group: ', np.std(np.array(inter)))
from nbminer.pipeline.pipeline import Pipeline
from nbminer.features.features import Features
from nbminer.preprocess.get_ast_features import GetASTFeatures
from nbminer.preprocess.get_imports import GetImports
from nbminer.preprocess.resample_by_node import ResampleByNode
from nbminer.encoders.ast_graph.ast_graph import ASTGraphReducer
from nbminer.preprocess.feature_encoding import FeatureEncoding
from nbminer.encoders.cluster.kmeans_encoder import KmeansEncoder
from nbminer.results.reconstruction_error.astor_error import AstorError
from nbminer.results.similarity.jaccard_similarity import NotebookJaccardSimilarity
a = Features(hw_notebook_objs, 'group_1')
a.add_notebooks(notebook_objs, 'group_2')
gastf = GetASTFeatures()
rbn = ResampleByNode()
gi = GetImports()
fe = FeatureEncoding()
ke = KmeansEncoder(n_clusters = 10)
#agr = ASTGraphReducer(a, threshold=20, split_call=False)
njs = NotebookJaccardSimilarity()
pipe = Pipeline([gastf, rbn, gi, fe, ke, njs])
a = pipe.transform(a)
from nbminer.pipeline.pipeline import Pipeline
from nbminer.features.features import Features
from nbminer.preprocess.get_ast_features import GetASTFeatures
from nbminer.preprocess.get_imports import GetImports
from nbminer.preprocess.resample_by_node import ResampleByNode
from nbminer.encoders.ast_graph.ast_graph import ASTGraphReducer
from nbminer.preprocess.feature_encoding import FeatureEncoding
from nbminer.encoders.cluster.kmeans_encoder import KmeansEncoder
from nbminer.results.similarity.jaccard_similarity import NotebookJaccardSimilarity
from nbminer.results.prediction.corpus_identifier import CorpusIdentifier
a = Features(hw_notebook_objs, 'group_1')
a.add_notebooks(notebook_objs, 'group_2')
gastf = GetASTFeatures()
rbn = ResampleByNode()
gi = GetImports()
fe = FeatureEncoding()
ke = KmeansEncoder(n_clusters = 10)
#agr = ASTGraphReducer(a, threshold=20, split_call=False)
ci = CorpusIdentifier()
pipe = Pipeline([gastf, rbn, gi, fe, ke, ci])
a = pipe.transform(a)
%matplotlib inline
import matplotlib.pyplot as plt
fpr, tpr, m = ci.predict()
print(m)
plt.plot(fpr, tpr)
from nbminer.pipeline.pipeline import Pipeline
from nbminer.features.features import Features
from nbminer.preprocess.get_simple_features import GetSimpleFeatures
from nbminer.results.prediction.corpus_identifier import CorpusIdentifier
a = Features(hw_notebook_objs, 'group_1')
a.add_notebooks(notebook_objs, 'group_2')
gsf = GetSimpleFeatures()
ci = CorpusIdentifier(feature_name='string')
pipe = Pipeline([gsf, ci])
a = pipe.transform(a)
%matplotlib inline
import matplotlib.pyplot as plt
fpr, tpr, m = ci.predict()
print(m)
plt.plot(fpr, tpr)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Combined Clustering
Step2: Prediction of group
|
7,198
|
<ASSISTANT_TASK:>
Python Code:
import pandas
import numpy as np
import matplotlib.pyplot as plt
df_lit = pandas.read_csv("../Data/childrens_lit.csv.bz2", sep='\t', index_col=0, encoding = 'utf-8', compression='bz2')
#drop rows where the text is missing.
df_lit = df_lit.dropna(subset=['text'])
#view the dataframe
df_lit
####Adopted From:
#Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
n_samples = 2000
n_topics = 4
n_top_words = 50
##This is a function to print out the top words for each topic in a pretty way.
#Don't worry too much about understanding every line of this code.
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("\nTopic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Vectorize our text using CountVectorizer
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.80, min_df=50,
max_features=None,
stop_words='english'
)
tf = tf_vectorizer.fit_transform(df_lit.text)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_topics=%d..."
% (n_samples, n_topics))
#define the lda function, with desired options
#Check the documentation, linked above, to look through the options
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=20,
learning_method='online',
learning_offset=80.,
total_samples=n_samples,
random_state=0)
#fit the model
lda.fit(tf)
#print the top words per topic, using the function defined above.
#Unlike R, which has a built-in function to print top words, we have to write our own for scikit-learn
#I think this demonstrates the different aims of the two packages: R is for social scientists, Python for computer scientists
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
####Exercise:
###Copy and paste the above code and fit a new model, lda_new, by changing some of the parameters. How does this change the output.
###Suggestions:
## 1. Change the number of topics.
## 2. Do not remove stop words.
## 3. Change other options, either in the vectorize stage or the LDA model
lda_new = LatentDirichletAllocation(n_topics=10, max_iter=20,
learning_method='online',
learning_offset=80.,
total_samples=n_samples,
random_state=0)
#fit the model
lda_new.fit(tf)
topic_dist = lda.transform(tf)
topic_dist
topic_dist_df = pandas.DataFrame(topic_dist)
df_w_topics = topic_dist_df.join(df_lit)
df_w_topics
print(df_w_topics[['title', 'author gender', 0]].sort_values(by=[0], ascending=False))
print(df_w_topics[['title', 'author gender', 1]].sort_values(by=[1], ascending=False))
#EX: What is the average topic weight by author gender, for each topic?
### Grapth these results
#Hint: You can use the python 'range' function and a for-loop
grouped_mean=df_w_topics.groupby('author gender').mean()
grouped_mean[[0,1,2,3]].plot(kind='bar')
plt.show()
#first create word count column
df_w_topics['word_count'] = df_w_topics['text'].apply(lambda x: len(str(x).split()))
df_w_topics['word_count']
#multiple topic weight by word count
df_w_topics['0_wc'] = df_w_topics[0] * df_w_topics['word_count']
df_w_topics['0_wc']
#create a for loop to do this for every topic
topic_columns = range(0, n_topics)
col_list = []
for num in topic_columns:
col = "%d_wc" % num
col_list.append(col)
#Solution
df_w_topics[col] = df_w_topics[num] * df_w_topics['word_count']
df_w_topics
#EX: What is the total number of words aligned with each topic, by author gender?
###Solution
grouped = df_w_topics.groupby("author gender")
grouped.sum()
#EX: What is the proportion of total words aligned with each topic, by author gender?
wc_columns = ['0_wc', '1_wc', '2_wc', '3_wc']
for n in wc_columns:
print(n)
print(grouped[n].sum()/grouped['word_count'].sum())
###EX:
# Find the most prevalent topic in the corpus.
# Find the least prevalent topic in the corpus.
# Hint: How do we define prevalence? What are different ways of measuring this,
# and the benefits/drawbacks of each?
for e in col_list:
print(e)
print(df_w_topics[e].sum()/df_w_topics['word_count'].sum())
for e in topic_columns:
print(e)
print(df_w_topics[e].mean())
grouped_year = df_w_topics.groupby('year')
fig3 = plt.figure()
chrt = 0
for e in col_list:
chrt += 1
ax2 = fig3.add_subplot(2,3, chrt)
(grouped_year[e].sum()/grouped_year['word_count'].sum()).plot(kind='line', title=e)
fig3.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='fit'></a>
Step2: <a id='dtd'></a>
Step3: Merge back in with the original dataframe.
Step4: Now we can sort the dataframe for the topic of interest, and view the top documents for the topics.
Step5: <a id='words'></a>
Step6: Question
Step7: <a id='time'></a>
|
7,199
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (5, 5, 5, 2)
conv = Conv3D(4, (3,3,3), strides=(1,1,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1,1),
activation='linear', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(130)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv3D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 4, 2)
conv = Conv3D(2, (3,3,3), strides=(1,1,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1,1),
activation='sigmoid', use_bias=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(131)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
#print('b shape:', weights[1].shape)
#print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv3D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 3, 2)
conv = Conv3D(2, (3,3,3), strides=(1,1,1), padding='same',
data_format='channels_last', dilation_rate=(1,1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(132)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv3D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 3, 2)
conv = Conv3D(2, (3,3,2), strides=(2,1,1), padding='same',
data_format='channels_last', dilation_rate=(1,1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(133)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv3D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 4, 2)
conv = Conv3D(2, (3,3,3), strides=(3,3,2), padding='same',
data_format='channels_last', dilation_rate=(1,1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(134)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv3D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 4, 4, 2)
conv = Conv3D(2, (3,3,3), strides=(1,1,1), padding='valid',
data_format='channels_last', dilation_rate=(2,1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(135)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv3D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 3, 2)
conv = Conv3D(2, (3,3,3), strides=(1,1,1), padding='same',
data_format='channels_last', dilation_rate=(1,2,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(136)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv3D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [convolutional.Conv3D.1] 2 3x3x3 filters on 4x4x4x2 input, strides=(1,1,1), padding='valid', data_format='channels_last', dilation_rate=(1,1,1), activation='sigmoid', use_bias=False
Step2: [convolutional.Conv3D.2] 2 3x3x3 filters on 4x4x3x2 input, strides=(1,1,1), padding='same', data_format='channels_last', dilation_rate=(1,1,1), activation='relu', use_bias=True
Step3: [convolutional.Conv3D.3] 2 3x3x2 filters on 4x4x3x2 input, strides=(2,1,1), padding='same', data_format='channels_last', dilation_rate=(1,1,1), activation='relu', use_bias=True
Step4: [convolutional.Conv3D.4] 2 3x3x3 filters on 6x6x4x2 input, strides=(3,3,2), padding='same', data_format='channels_last', dilation_rate=(1,1,1), activation='relu', use_bias=True
Step5: [convolutional.Conv3D.5] 2 3x3x3 filters on 6x4x4x2 input, strides=(1,1,1), padding='valid', data_format='channels_last', dilation_rate=(2,1,1), activation='relu', use_bias=True
Step6: [convolutional.Conv3D.6] 2 3x3x3 filters on 4x4x3x2 input, strides=(1,1,1), padding='same', data_format='channels_last', dilation_rate=(1,2,1), activation='relu', use_bias=True
Step7: export for Keras.js tests
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.