text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: examine the sorting code
Step4: How were they sorted?
|
<ASSISTANT_TASK:>
Python Code:
%load data/numbers.txt
# %load code/MRSortByString.py
from mrjob.job import MRJob
class MRSortByString(MRJob):
def mapper(self, _, line):
l = line.split(' ')
print l
yield l[1], l[0]
def reducer(self, key, val):
yield key, [v for v in val][0]
if __name__ == '__main__':
MRSortByString.run()
%run code/MRSortByString.py data/numbers.txt
# %load code/MRSortByInt.py
from mrjob.job import MRJob
class MRSortByInt(MRJob):
def mapper(self, _, line):
l = line.strip('\n').split()
yield '%01d'%int(l[1]), l[0]
def reducer(self, key, val):
yield int(key), int(list(val)[0])
if __name__ == '__main__':
MRSortByInt.run()
%run code/MRSortByInt.py data/numbers.txt
%%writefile data/sortdata.txt
1 1
2 4
3 8
4 2
4 7
5 5
6 10
7 11
# Running code inline example
# -*- coding: utf-8 -*-
# Testing word frequency count
import os, sys
sys.path.append(os.path.join(os.getcwd(),"code"))
from MRSortByString import *
from mrjob.job import MRJob
'''
This is a simple wrapper that runs mrjob MapReduce jobs, the inputs are:
MRJobClass - the class of the job to be run
argsArr - an array of strings to be used when creating the MRJob.
@author: Peter Harrington if you have any questions: peter.b.harrington@gmail.com
'''
def runJob(MRJobClass, argsArr, loc='local'):
if loc == 'emr':
argsArr.extend(['-r', 'emr'])
print "starting %s job on %s" % (MRJobClass.__name__, loc)
mrJob = MRJobClass(args=argsArr)
runner = mrJob.make_runner()
runner.run()
print "finished %s job" % MRJobClass.__name__
return mrJob, runner
def runParallelJob(MRJobClass, argsArr): #TO DO: add threading to allow jobs to run in
pass #parallel
#launch a new thread
#call runJob(MRJobClass, argsArr) on the new thread
if __name__ == '__main__':
# pass in file from outside
# MRWordFrequencyCount.run()
#setup file here
mr_job, runner = runJob(MRSortByString,[os.path.join(os.path.join(os.getcwd(),"data"),"sortdata.txt")],"local")
print "Sorting sortdata.txt"
for line in runner.stream_output():
key, value = mr_job.parse_output_line(line)
print "%s: %s "%(key,value)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cambios de tarifa
Step2: Consumo horario
Step3: Factura a partir de consumo horario
Step4: Reparto de la facturación en otros intervalos
Step5: Exportación a dict
Step6: Exportación a HTML
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from esiosdata import FacturaElec
from esiosdata.facturapvpc import (TIPO_PEAJE_GEN, TIPO_PEAJE_NOC, TIPO_PEAJE_VHC,
ZONA_IMPUESTOS_PENIN_BALEARES, ZONA_IMPUESTOS_CANARIAS)
from esiosdata.prettyprinting import *
t_0, t_f = '2016-11-01', '2017-01-05'
f1 = FacturaElec(t_0, t_f, tipo_peaje=TIPO_PEAJE_VHC, consumo=[219, 126, 154],
zona_impuestos=ZONA_IMPUESTOS_PENIN_BALEARES)
print_ok(f1)
f1.tipo_peaje = 2
print_info(f1)
f1.tipo_peaje = 'GEN'
print_cyan(f1)
consumo_horario = f1.consumo_horario
print_ok('Consumo horario [{}]: {} horas, TOTAL={:.2f} kWh\nHead:\n{}\nTail:{}'
.format(type(consumo_horario), len(consumo_horario), consumo_horario.sum(),
consumo_horario.head(), consumo_horario.tail()))
f2 = FacturaElec(consumo=consumo_horario)
f2
# Reparto en varias facturas:
import datetime as dt
consumo_total = f2.consumo_horario
t0, tf = consumo_total.index[0].date(), consumo_total.index[-1].date()
div_dias = f2.num_dias_factura // 3
# print(t0, tf, div_dias, f2.num_dias_factura)
c1 = consumo_total.loc[:t0 + dt.timedelta(days=div_dias)].iloc[:-1]
c2 = consumo_total.loc[t0 + dt.timedelta(days=div_dias): t0 + dt.timedelta(days=2*div_dias)].iloc[:-1]
c3 = consumo_total.loc[t0 + dt.timedelta(days=2*div_dias):]
assert c1.shape[0] + c2.shape[0] + c3.shape[0] == consumo_total.shape[0]
facturas = [FacturaElec(consumo=c) for c in [c1, c2, c3]]
print_ok(sum([f.coste_total for f in facturas]))
print_ok(f2.coste_total)
import json
d_factura = f2.to_dict()
print_ok(d_factura)
print_info(json.dumps(d_factura))
from ipywidgets import HTML, Layout
# Html simple, para incluir en otra web
HTML(f2.to_html(), layout=Layout(width='75%'))
# Página web completa:
HTML(f2.to_html(web_completa=True), layout=Layout(width='75%'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hendrickson = fRIP with 24 chromatin related protein
Step2: Convert coordinates from hg19 to hg39 with Crossmap and remap on last gencode human genes...
Step3: Remap on the human transcriptome with pyBedTools
Step4: List the uniq gene ids for each samples
Step5: Merge the replicates of the same experiment
Step6: Further dataset analysis
|
<ASSISTANT_TASK:>
Python Code:
# Standard lib imports
from glob import glob
from os import remove
# Local lib import
import pycl
import pyBioPlot as pbl
# Third party import
import pandas as pd
import numpy as np
import pylab as pl
import seaborn as sns
import scipy.stats as stats
# Pyplot tweaking
%matplotlib inline
pl.rcParams['figure.figsize'] = 30, 10 # that's default image size for this interactive session
# Larger display
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:75% !important; }</style>"))
# Allow to use R directly
%load_ext rpy2.ipython
# Simplify warning reporting to lighten the notebook style
import warnings
warnings.formatwarning=(lambda message, *args: "{}\n".format(message))
df = pd.read_csv("./Protein_interaction/Li_ClipSeq/Protocol_info/Selected_datasets.csv", sep="\t", index_col=0)
df
def liftover (input_file, output_file, unmap_file, liftover_chainfile):
# Conversion of coordinates with Crossmap/liftover
with open (output_file, "w") as output, open(unmap_file, "w") as unmap:
# Build the command line
cmd = "CrossMap.py bed {} {}".format(liftover_chainfile, input_file)
# Write Liftovers coordinates
for line in pycl.bash(cmd, ret_stdout=True).split("\n"):
ls = line.split("\t")
try:
output.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(ls[7], ls[8], ls[9], ls[10], ls[11], ls[12]))
except IndexError as E:
unmap.write(line+"\n")
# Test with one file
pycl.mkdir("./Protein_interaction/Li_ClipSeq/test")
liftover_chainfile = "../../LiftOver_chain_files/hg19ToHg38.over.chain.gz"
input_file = "./Protein_interaction/Li_ClipSeq/Human/HHCD1.bed"
output_file = "./Protein_interaction/Li_ClipSeq/test/HHCD1_converted.bed"
unmap_file = "./Protein_interaction/Li_ClipSeq/test/HHCD1_unmap.bed"
liftover(input_file, output_file, unmap_file, liftover_chainfile)
pycl.head(input_file, 3)
pycl.head(output_file, 3)
pycl.head(unmap_file, 3)
import pybedtools
def intersect_bed (annotation_file, peak_file, output_file):
# Parse file with pybedtools and intersect the coordinate ranges
peak = pybedtools.BedTool(peak_file)
annotation = pybedtools.BedTool(annotation_file)
intersection = peak.intersect(annotation, wo=True, s=True)
# Reformat the file generated by pybedtools with the merged intervals to a simple Bed format
init_template=["{chrom}","\t","{start}","\t","{end}","\t","{info}","\t","{score}","\t","{strand}","\t","{chrom2}","\t","{source}","\t","{feature}","\t","{start2}",
"\t","{end2}","\t","{score2}","\t","{strand2}","\t","{frame}","\tID=","{ID}",";gene_id=","{gene_id}",";gene_type=","{gene_type}",
";gene_status=","{gene_status}",";gene_name=","{gene_name}",";level=","{level}",";havana_gene=","{havana_gene}","\t","{len}"]
final_template=["{chrom}","\t","{start}","\t","{end}","\t","{gene_id}","|","{gene_name}","|","{gene_type}","|","{len}","\t","{score}","\t","{strand}"]
pycl.reformat_table(
input_file=intersection.fn, output_file=output_file, init_template=init_template, final_template=final_template,
replace_internal_space='_', replace_null_val="-", keep_original_header=False, predicate=lambda v: v["feature"] == "gene", header_from_final_template=True)
# Test with one file
annotation_file = "../../Reference_Annotation/gencode_v24.gff3"
peak_file = "./Protein_interaction/Li_ClipSeq/test/HHCD1_converted.bed"
output_file = "./Protein_interaction/Li_ClipSeq/test/HHCD1_annotated.bed"
intersect_bed (annotation_file, peak_file, output_file)
pycl.linerange(peak_file)
pycl.linerange(output_file)
def unique_genes_list (input_file_list, output_file):
gene_dict = {}
# iterate on replicates of the same experiment
for input_file in input_file_list:
df = pd.read_csv(input_file, sep="[\t|]", engine='python')
# iterate on lines of a replicate bed file
for gene_id, gene_df in df.groupby("gene_id"):
if gene_id not in gene_dict:
gene_dict[gene_id] = {
"gene_name":gene_df["gene_name"].unique()[0],
"gene_type":gene_df["gene_type"].unique()[0],
"all_count":0, "rep_count":0}
gene_dict[gene_id]["all_count"]+=len(gene_df)
gene_dict[gene_id]["rep_count"]+=1
# Write the valid gene entries in a file
with open (output_file, "w") as output:
output.write("gene_id\tgene_name\tgene_type\tall_count\trep_count\n")
for gene_id, val in gene_dict.items():
if val["rep_count"] == len(input_file_list):
output.write("{}\t{}\t{}\t{}\t{}\n".format(gene_id, val["gene_name"], val["gene_type"], val["all_count"], val["rep_count"]))
# Test with one file
input_file = ["./Protein_interaction/Li_ClipSeq/test/HCTA1_annotated.bed"]
output_file = "./Protein_interaction/Li_ClipSeq/test/HCTA1_uniq_gene.tsv"
unique_genes_list(input_file, output_file)
pycl.linerange(output_file)
# Test with a list of files
input_file = [ "./Protein_interaction/Li_ClipSeq/test/HCTA1_annotated.bed", "./Protein_interaction/Li_ClipSeq/test/HHCD1_annotated.bed"]
output_file = "./Protein_interaction/Li_ClipSeq/test/HHMF1-3_uniq_gene.tsv"
unique_genes_list(input_file, output_file)
pycl.linerange(output_file)
liftover_chainfile = "../../LiftOver_chain_files/hg19ToHg38.over.chain.gz"
annotation_file = "../../Reference_Annotation/gencode_v24.gff3"
intermediate_folder = "./Protein_interaction/Li_ClipSeq/selected_datasets/"
final_folder = "./Protein_interaction/Li_ClipSeq/final_datasets/"
pycl.mkdir(intermediate_folder)
pycl.mkdir(final_folder)
# df containing the sample list details
df = pd.read_csv("./Protein_interaction/Li_ClipSeq/Protocol_info/Selected_datasets.csv", sep="\t", index_col=0)
# dict to count the file of the different dataset and experiments
dataset_dict = {}
exp_dict = {}
# Iterate by all sample of the same esperiment (include replicates if there are some replicates)
for (prot,exp, cl, loc, aut), exp_df in df.groupby(["protein","experiment", "cell_line", "localization", "author"]):
exp_id = "{}_{}_{}_{}_{}".format(prot,exp, cl, loc, aut)
print("Analysis experiment ", exp_id)
intersect_file_list=[]
# Iterate on the replicates of the same experiment
for dataset_id, line in exp_df.iterrows():
print("\tAnalysis dataset {}".format(dataset_id))
input_file = line["filepath"]
print ("\t\tConverting coordinates with liftover")
converted_file = "{}{}_coord_hg38.bed".format(intermediate_folder, dataset_id)
unmap_file = "{}{}_unmap.bed".format(intermediate_folder, dataset_id)
liftover(input_file, converted_file, unmap_file, liftover_chainfile)
print ("\t\tIntersect with annotation file")
intersect_file = "{}{}_coord_hg38_annot_gencode24.bed".format(intermediate_folder, dataset_id)
intersect_file_list.append (intersect_file)
intersect_bed (annotation_file, converted_file, intersect_file)
# Fill the counter
dataset_dict[dataset_id] = {
"Init_sites":pycl.simplecount(input_file),
"Converted_coord_sites": pycl.simplecount(converted_file),
"Unmap_sites": pycl.simplecount(unmap_file),
"Annotated_sites": pycl.simplecount(intersect_file)}
# list uniq genes for each experiment. Replicate experiment and inner-joined before
print ("\tList uniq genes")
unique_gene_file = "{}{}_uniq_genes.tsv".format(final_folder, exp_id)
unique_genes_list (intersect_file_list, unique_gene_file)
exp_dict[exp_id] = {
"Uniq_genes": pycl.simplecount(unique_gene_file),
"Replicate_Number": len(intersect_file_list)}
pycl.dict_to_html(dataset_dict)
pycl.dict_to_html(exp_dict)
pycl.linerange("./Protein_interaction/Li_ClipSeq/final_datasets/AGO1_PAR-CLIP_HEK293_cytosol_hafner_uniq_genes.tsv")
input_folder = "./Protein_interaction/Li_ClipSeq/final_datasets/"
output_file = "./Protein_interaction/Li_ClipSeq/final_datasets/ALL_dataset_uniq_genes.tsv"
remove(output_file)
# dict to count the file of the different categorie
gene_loc_dict = {}
for input_file in glob(input_folder+"*tsv"):
sample_name = pycl.file_basename(input_file)
print("Analysis dataset ", sample_name)
localization = sample_name.split("_")[3]
unique_gene_df = pd.read_csv(input_file, sep="\t", index_col=0)
for gene_id, gene_line in unique_gene_df.iterrows():
if gene_id not in gene_loc_dict:
gene_loc_dict[gene_id] = {"gene_name": gene_line["gene_name"] ,"gene_type":gene_line["gene_type"] ,"cytosol":0, "nucleus":0}
gene_loc_dict[gene_id][localization] += 1
# Write results in a file
with open (output_file, "w") as output:
output.write("gene_id\tgene_name\tgene_type\tcytosol_protein\tnuclear_protein\tnuclearness\n")
for gene_id, val in gene_loc_dict.items():
output.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(gene_id,val["gene_name"],val["gene_type"],val["cytosol"],val["nucleus"],val["nucleus"]-val["cytosol"]))
pycl.linerange(output_file)
# Create a distribution graph to identify the number of pure nuclear interacting or pure cytosol interacting RNAs
distrib_nucleus = []
distrib_cytosol = []
for gene_id, val in gene_loc_dict.items():
if not val["nucleus"]:
distrib_cytosol.append(val["cytosol"])
elif not val["cytosol"]:
distrib_nucleus.append(val["nucleus"])
distrib_nucleus.sort()
distrib_cytosol.sort()
p= pl.hist(distrib_nucleus,align="mid", histtype="stepfilled")
p= pl.hist(distrib_cytosol,align="mid", histtype="stepfilled")
# Create a distribution graph to identify the number of pure nuclear interacting or pure cytosol interacting RNAs
distrib_nuclearness = sorted([val["nucleus"]-val["cytosol"] for val in gene_loc_dict.values()])
p= pl.hist(distrib_nuclearness)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problems
Step2: Problems
Step3: Problems
Step4: If we want to look at covariates, we need a new approach.
Step5: Once we've fit the data, we need to do something useful with it. Try to do the following things
Step6: Model selection
|
<ASSISTANT_TASK:>
Python Code:
running_id = 0
output = [[0]]
with open("E:/output.txt") as file_open:
for row in file_open.read().split("\n"):
cols = row.split(",")
if cols[0] == output[-1][0]:
output[-1].append(cols[1])
output[-1].append(True)
else:
output.append(cols)
output = output[1:]
for row in output:
if len(row) == 6:
row += [datetime(2016, 5, 3, 20, 36, 8, 92165), False]
output = output[1:-1]
def convert_to_days(dt):
day_diff = dt / np.timedelta64(1, 'D')
if day_diff == 0:
return 23.0
else:
return day_diff
df = pd.DataFrame(output, columns=["id", "advert_time", "male","age","search","brand","conversion_time","event"])
df["lifetime"] = pd.to_datetime(df["conversion_time"]) - pd.to_datetime(df["advert_time"])
df["lifetime"] = df["lifetime"].apply(convert_to_days)
df["male"] = df["male"].astype(int)
df["search"] = df["search"].astype(int)
df["brand"] = df["brand"].astype(int)
df["age"] = df["age"].astype(int)
df["event"] = df["event"].astype(int)
df = df.drop('advert_time', 1)
df = df.drop('conversion_time', 1)
df = df.set_index("id")
df = df.dropna(thresh=2)
df.median()
###Parametric Bayes
#Shout out to Cam Davidson-Pilon
## Example fully worked model using toy data
## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html
## Note that we've made some corrections
N = 2500
##Generate some random data
lifetime = pm.rweibull( 2, 5, size = N )
birth = pm.runiform(0, 10, N)
censor = ((birth + lifetime) >= 10)
lifetime_ = lifetime.copy()
lifetime_[censor] = 10 - birth[censor]
alpha = pm.Uniform('alpha', 0, 20)
beta = pm.Uniform('beta', 0, 20)
@pm.observed
def survival(value=lifetime_, alpha = alpha, beta = beta ):
return sum( (1-censor)*(log( alpha/beta) + (alpha-1)*log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(50000, 30000)
pm.Matplot.plot(mcmc)
mcmc.trace("alpha")[:]
#### Fit to your data here
#### Plot the distribution of the median
#### Adjust burn and thin, both paramters of the mcmc sample function
#### Narrow and broaden prior
#### Hypothesis testing
### Fit a cox proprtional hazards model
#### Plot baseline hazard function
#### Predict
#### Plot survival functions for different covariates
#### Plot some odds
#### BMA Coefficient values
#### Different priors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'col1': {0: 'a', 1: 'b', 2: 'c'},
'col2': {0: 1, 1: 3, 2: 5},
'col3': {0: 2, 1: 4, 2: 6},
'col4': {0: 3, 1: 6, 2: 2},
'col5': {0: 7, 1: 2, 2: 3},
'col6': {0: 2, 1: 9, 2: 5},
})
df.columns = [list('AAAAAA'), list('BBCCDD'), list('EFGHIJ')]
def g(df):
return pd.melt(df)
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters
Step2: Model
Step3: Build model, solve, and calculate total discharge and distance to the 5 cm drawdown contour.
Step4: Plot an overview of the model
Step5: Visualizations
Step6: Model 2
Step7: Build model, solve, and calculate total discharge and distance to the 5 cm drawdown contour.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
# import sys
# sys.path.insert(1, "..")
import timml
kh = 2. # m/day
f_ani = 0.05 # anisotropy factor
kv = f_ani*kh
ctop = 800. # resistance top leaky layer in days
ztop = 0. # surface elevation
z_well = -13. # end depth of the wellscreen
z_dw = -15. # bottom elevation of sheetpile wall
z_extra = z_dw - 15. # extra layer
zbot = -60. # bottom elevation of the model
l = 40. # length building pit in m
b = 30. # width building pit in m
h_bem = -6.21 # m
offset = 5. # distance groundwater extraction element from sheetpiles in m
xy = [(-l/2, -b/2), (l/2, -b/2), (l/2, b/2), (-l/2, b/2), (-l/2, -b/2)]
for (x, y) in xy:
p2, = plt.plot(x, y, "o", label="building pit pts")
plt.axis("equal");
plt.show()
z = np.array([ztop+1, ztop, z_dw, z_dw, z_extra, z_extra, zbot])
dz = z[1::2] - z[2::2]
dz
kh_arr = kh * np.ones(dz.shape)
c = np.r_[np.array([ctop]), dz[:-1]/(2*kv) + dz[1:]/(2*kv)]
c
ml = timml.ModelMaq(kaq=kh_arr, z=z, c=c, topboundary="semi", hstar=0.0, f2py=False)
layers = np.arange(np.sum(z_dw <= ml.aq.zaqbot))
last_lay_dw = layers[-1]
inhom = timml.BuildingPit(ml, xy, kaq=kh_arr, z=z[1:], topboundary="conf",
c=c[1:], order=4, ndeg=3, layers=layers)
timml.HeadLineSink(ml, x1=-l/2+offset, y1=b/2-offset, x2=l/2-offset, y2=b/2-offset, hls=h_bem,
layers=np.arange(last_lay_dw+1))
timml.HeadLineSink(ml, x1=-l/2+offset, y1=0, x2=l/2-offset, y2=0, hls=h_bem,
layers=np.arange(last_lay_dw+1))
timml.HeadLineSink(ml, x1=-l/2+offset, y1=-b/2+offset, x2=l/2-offset, y2=-b/2+offset, hls=h_bem,
layers=np.arange(last_lay_dw+1))
# ml.solve_mp(nproc=2)
ml.solve()
Qtot = 0.
for e in ml.elementlist:
if e.name == "HeadLineSink":
Qtot += e.discharge()
print("Debiet =", np.round(Qtot.sum(), 2), "m3/dag")
y = np.linspace(-b/2-25, b/2+1100, 201)
hl = ml.headalongline(np.zeros(201), y, layers=[0])
y_5cm = np.interp(-0.05, ml.headalongline(np.zeros(201), y, layers=0).squeeze(), y, right=np.nan)
print("Distance to 5 cm drawdown contour =", np.round(y_5cm, 2), "m")
# Q_arr[mi] = Qtot.sum()
# y5cm_arr[mi] = y_5cm
ml.plot()
x = np.linspace(-l/2-25, l/2+1100, 201)
hl = ml.headalongline(x, np.zeros(201), layers=[last_lay_dw, last_lay_dw+1])
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
ax.plot(x, hl[0].squeeze(), label="head layer {}".format(last_lay_dw))
ax.plot(x, hl[1].squeeze(), label="head layer {}".format(last_lay_dw+1))
ax.axhline(-0.05, color="r", linestyle="dashed", lw=0.75, label="-0.05 m")
ax.axhline(-0.5, color="k", linestyle="dashed", lw=0.75, label="-0.5 m")
ax.set_xlabel("x (m)")
ax.set_ylabel("head (m)")
ax.legend(loc="best")
ax.grid(b=True)
xoffset = 15
zoffset = 15
x1, x2, y1, y2 = [-l/2-xoffset, -l/2+xoffset, 0, 0]
nudge = 1e-6
n = 301
# plot head contour cross-sections
h = ml.headalongline(np.linspace(x1 + nudge, x2 - nudge, n),
np.linspace(y1 + nudge, y2 - nudge, n))
L = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
xg = np.linspace(0, L, n)+x1
zg = 0.5 * (ml.aq.zaqbot + ml.aq.zaqtop)
zg = np.hstack((ml.aq.zaqtop[0], zg, ml.aq.zaqbot[-1]))
h = np.vstack((h[0], h, h[-1]))
levels = np.linspace(h_bem, -0.0, 51)
fig, ax = plt.subplots(1, 1, figsize=(16, 8))
ml.plot(win=[x1, x2, y1, y2], orientation="ver", newfig=False)
# ml.vcontoursf1D(x1, x2, n, levels=101, newfig=False, ax=ax, color="r")
cf = ax.contourf(xg, zg, h, levels)
cs = ax.contour(xg, zg, h, levels, colors="k", linewidths=0.5)
# cs2 = ax.contour(xg, zg2, h2, levels, colors="r", linewidths=0.5, linestyles="dashed")
# plt.clabel(cs, fmt="%.2f")
# plt.clabel(cs2, fmt="%.2f")
ax.set_ylim(z_dw-zoffset, z_dw+zoffset)
ax.set_ylabel("diepte (m NAP)");
ax.set_xlabel("m")
ax.set_aspect("equal")
plt.colorbar(cf, ax=ax)
plt.show()
n = 11 # aantal laagjes boven en onder damwand
dz_i_top = (z_well-z_dw)/np.sum(np.arange(n+1))
dz_i_bot = (z_dw-z_extra)/np.sum(np.arange(2*n+1))
z_layers_top = np.cumsum(np.arange(n)*dz_i_top)
z_layers_bot = np.cumsum(np.arange(2*n)*dz_i_bot)
zgr = np.r_[z_dw + z_layers_top[::-1], (z_dw-z_layers_bot)[1:]]
z4 = np.r_[np.array([ztop+1, ztop, z_well, z_well]), np.repeat(zgr, 2, 0), z_extra, z_extra, zbot]
# z4 = np.r_[np.array([ztop+1, ztop, z_well]), np.repeat(zgr, 2, 0)[1:-1]]
dz4 = z4[1:-1:2] - z4[2::2]
kh_arr = kh * np.ones(dz4.shape)
c = np.r_[np.array([ctop]), dz4[:-1]/(2*kv) + dz4[1:]/(2*kv)]
kh_arr2 = kh_arr.copy()
kh_arr2[0] = 1e-5
ml = timml.ModelMaq(kaq=kh_arr, z=z4, c=c, topboundary="semi", hstar=0.0)
layers = np.arange(np.sum(z_dw <= ml.aq.zaqbot))
last_lay_dw = layers[-1]
inhom = timml.BuildingPit(ml, xy, kaq=kh_arr2, z=z4[1:], topboundary="conf",
c=c[1:], order=4, ndeg=3, layers=layers)
# wlayers = np.arange(np.sum(z_well <= ml.aq.zaqbot))
wlayers = np.arange(np.sum(-14 <= ml.aq.zaqbot))
wlayers=wlayers[1:]
timml.HeadLineSink(ml, x1=-l/2+offset, y1=b/2-offset, x2=l/2-offset, y2=b/2-offset, hls=h_bem,
layers=wlayers)
timml.HeadLineSink(ml, x1=-l/2+offset, y1=0, x2=l/2-offset, y2=0, hls=h_bem,
layers=wlayers, order=5)
timml.HeadLineSink(ml, x1=-l/2+offset, y1=-b/2+offset, x2=l/2-offset, y2=-b/2+offset, hls=h_bem,
layers=wlayers);
# ml.solve_mp(nproc=2)
ml.solve()
Qtot = 0.
for e in ml.elementlist:
if e.name == "HeadLineSink":
Qtot += e.discharge()
print("Debiet =", np.round(Qtot.sum(), 2), "m3/dag")
y = np.linspace(-b/2-25, b/2+1100, 201)
hl = ml.headalongline(np.zeros(201), y, layers=[0])
y_5cm = np.interp(-0.05, ml.headalongline(np.zeros(201), y, layers=0).squeeze(), y, right=np.nan)
print("Distance to 5 cm drawdown contour =", np.round(y_5cm, 2), "m")
last_lay_dw = layers[-1]
x = np.linspace(-l/2-25, l/2+1, 201)
# x = np.linspace(-l/2-25, l/2+1100, 201)
hl = ml.headalongline(x, np.zeros(201), layers=[0, last_lay_dw, last_lay_dw+1])
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
ax.plot(x, hl[0].squeeze(), label="head layer 0")
ax.plot(x, hl[1].squeeze(), label="head layer {}".format(last_lay_dw))
ax.plot(x, hl[2].squeeze(), label="head layer {}".format(last_lay_dw+1))
ax.axhline(-0.05, color="r", linestyle="dashed", lw=0.75, label="-0.05 m")
ax.axhline(-0.5, color="k", linestyle="dashed", lw=0.75, label="-0.5 m")
ax.set_xlabel("x (m)")
ax.set_ylabel("head (m)")
ax.legend(loc="best")
ax.grid(b=True)
xoffset = 50
zoffset = 15
x1, x2, y1, y2 = [-l/2-xoffset, -l/2+xoffset, 0, 0]
nudge = 1e-6
n = 301
# plot head contour cross-sections
h = ml.headalongline(np.linspace(x1 + nudge, x2 - nudge, n),
np.linspace(y1 + nudge, y2 - nudge, n))
L = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
xg = np.linspace(0, L, n) + x1
zg = 0.5 * (ml.aq.zaqbot + ml.aq.zaqtop)
zg = np.hstack((ml.aq.zaqtop[0], zg, ml.aq.zaqbot[-1]))
h = np.vstack((h[0], h, h[-1]))
levels = np.linspace(h_bem-.1, -0.0, 51)
fig, ax = plt.subplots(1, 1, figsize=(16, 8))
ml.plot(win=[x1, x2, y1, y2], orientation="ver", newfig=False)
# ml.vcontoursf1D(x1, x2, n, levels=101, newfig=False, ax=ax, color="r")
cf = ax.contourf(xg, zg, h, levels)
cs = ax.contour(xg, zg, h, levels, colors="k", linewidths=0.5)
# cs2 = ax.contour(xg, zg2, h2, levels, colors="r", linewidths=0.5, linestyles="dashed")
# plt.clabel(cs, fmt="%.2f")
# plt.clabel(cs2, fmt="%.2f")
ax.set_ylim(z_dw-zoffset, z_dw+zoffset)
ax.set_ylabel("depth (m NAP)");
ax.set_xlabel("m");
# ax.set_aspect("equal")
cb = plt.colorbar(cf, ax=ax)
cb.ax.set_ylabel("stijghoogte (m)")
# ax.set_ylim(-20, -5)
# ax.set_xlim(-70, -57)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
Step2: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.
Step3: Training
Step4: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss and the test loss afterwards.
Step5: Checking out the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Size of the encoding layer (the hidden layer)
encoding_dim = 32
image_size = mnist.train.images.shape[1]
inputs_ = tf.placeholder(tf.float32, (None, image_size), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, image_size), name='targets')
# Output of hidden layer
encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)
# Output layer logits
logits = tf.layers.dense(encoded, image_size, activation=None)
# Sigmoid output from
decoded = tf.nn.sigmoid(logits, name='output')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
# Create the session
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Assume that we have 4 assets, each with a return series of length 1000. We can use numpy.random.randn to sample returns from a normal distribution.
Step2: These return series can be used to create a wide range of portfolios, which all
Step3: Next, lets evaluate how many of these random portfolios would perform. Towards this goal we are calculating the mean returns as well as the volatility (here we are using standard deviation). You can also see that there is
Step4: In the code you will notice the calculation of the return with
Step5: Upon plotting those you will observe that they form a characteristic parabolic
Step6: Markowitz optimization and the Efficient Frontier
Step7: In yellow you can see the optimal portfolios for each of the desired returns (i.e. the mus). In addition, we get the one optimal portfolio returned
Step8: Backtesting on real market data
Step9: Next, we'll create a zipline algorithm by defining two functions -- initialize() which is called once before the simulation starts, and handle_data() which is called for every trading bar. We then instantiate the algorithm object.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import cvxopt as opt
from cvxopt import blas, solvers
import pandas as pd
np.random.seed(123)
# Turn off progress printing
solvers.options['show_progress'] = False
## NUMBER OF ASSETS
n_assets = 4
## NUMBER OF OBSERVATIONS
n_obs = 1000
return_vec = np.random.randn(n_assets, n_obs)
plt.plot(return_vec.T, alpha=.4);
plt.xlabel('time')
plt.ylabel('returns')
def rand_weights(n):
''' Produces n random weights that sum to 1 '''
k = np.random.rand(n)
return k / sum(k)
print rand_weights(n_assets)
print rand_weights(n_assets)
def random_portfolio(returns):
'''
Returns the mean and standard deviation of returns for a random portfolio
'''
p = np.asmatrix(np.mean(returns, axis=1))
w = np.asmatrix(rand_weights(returns.shape[0]))
C = np.asmatrix(np.cov(returns))
mu = w * p.T
sigma = np.sqrt(w * C * w.T)
# This recursion reduces outliers to keep plots pretty
if sigma > 2:
return random_portfolio(returns)
return mu, sigma
n_portfolios = 500
means, stds = np.column_stack([
random_portfolio(return_vec)
for _ in xrange(n_portfolios)
])
plt.plot(stds, means, 'o', markersize=5)
plt.xlabel('std')
plt.ylabel('mean')
plt.title('Mean and standard deviation of returns of randomly generated portfolios')
def optimal_portfolio(returns):
n = len(returns)
returns = np.asmatrix(returns)
N = 100
mus = [10**(5.0 * t/N - 1.0) for t in range(N)]
# Convert to cvxopt matrices
S = opt.matrix(np.cov(returns))
pbar = opt.matrix(np.mean(returns, axis=1))
# Create constraint matrices
G = -opt.matrix(np.eye(n)) # negative n x n identity matrix
h = opt.matrix(0.0, (n ,1))
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Calculate efficient frontier weights using quadratic programming
portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x']
for mu in mus]
## CALCULATE RISKS AND RETURNS FOR FRONTIER
returns = [blas.dot(pbar, x) for x in portfolios]
risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios]
## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE
m1 = np.polyfit(returns, risks, 2)
x1 = np.sqrt(m1[2] / m1[0])
# CALCULATE THE OPTIMAL PORTFOLIO
wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x']
return np.asarray(wt), returns, risks
weights, returns, risks = optimal_portfolio(return_vec)
plt.plot(stds, means, 'o')
plt.ylabel('mean')
plt.xlabel('std')
plt.plot(risks, returns, 'y-o')
print weights
from zipline.utils.factory import load_bars_from_yahoo
end = pd.Timestamp.utcnow()
start = end - 2500 * pd.tseries.offsets.BDay()
data = load_bars_from_yahoo(stocks=['IBM', 'GLD', 'XOM', 'AAPL',
'MSFT', 'TLT', 'SHY'],
start=start, end=end)
data.loc[:, :, 'price'].plot(figsize=(8,5))
plt.ylabel('price in $')
import zipline
from zipline.api import (add_history,
history,
set_slippage,
slippage,
set_commission,
commission,
order_target_percent)
from zipline import TradingAlgorithm
def initialize(context):
'''
Called once at the very beginning of a backtest (and live trading).
Use this method to set up any bookkeeping variables.
The context object is passed to all the other methods in your algorithm.
Parameters
context: An initialized and empty Python dictionary that has been
augmented so that properties can be accessed using dot
notation as well as the traditional bracket notation.
Returns None
'''
# Register history container to keep a window of the last 100 prices.
add_history(100, '1d', 'price')
# Turn off the slippage model
set_slippage(slippage.FixedSlippage(spread=0.0))
# Set the commission model (Interactive Brokers Commission)
set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
context.tick = 0
def handle_data(context, data):
'''
Called when a market event occurs for any of the algorithm's
securities.
Parameters
data: A dictionary keyed by security id containing the current
state of the securities in the algo's universe.
context: The same context object from the initialize function.
Stores the up to date portfolio as well as any state
variables defined.
Returns None
'''
# Allow history to accumulate 100 days of prices before trading
# and rebalance every day thereafter.
context.tick += 1
if context.tick < 100:
return
# Get rolling window of past prices and compute returns
prices = history(100, '1d', 'price').dropna()
returns = prices.pct_change().dropna()
try:
# Perform Markowitz-style portfolio optimization
weights, _, _ = optimal_portfolio(returns.T)
# Rebalance portfolio accordingly
for stock, weight in zip(prices.columns, weights):
order_target_percent(stock, weight)
except ValueError as e:
# Sometimes this error is thrown
# ValueError: Rank(A) < p or Rank([P; A; G]) < n
pass
# Instantinate algorithm
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data)
# Run algorithm
results = algo.run(data)
results.portfolio_value.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3> Extract sample data from BigQuery </h3>
Step2: Let's increase the number of records so that we can do some neat graphs. There is no guarantee about the order in which records are returned, and so no guarantee about which records get returned if we simply increase the LIMIT. To properly sample the dataset, let's use the HASH of the pickup time and return 1 in 100,000 records -- because there are 1 billion records in the data, we should get back approximately 10,000 records if we do this.
Step3: <h3> Exploring data </h3>
Step4: Hmm ... do you see something wrong with the data that needs addressing?
Step5: What's up with the streaks around 45 dollars and 50 dollars? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable.
Step6: Looking at a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool.
Step7: Hmm ... The min, max of longitude look strange.
Step8: As you'd expect, rides that involve a toll are longer than the typical ride.
Step9: The quality control has removed about 300 rows (11400 - 11101) or about 3% of the data. This seems reasonable.
Step10: Let's write out the three dataframes to appropriately named csv files. We can use these csv files for local training (recall that these files represent only 1/100,000 of the full dataset) just to verify our code works, before we run it on all the data.
Step11: <h3> Verify that datasets exist </h3>
Step12: We have 3 .csv files corresponding to train, valid, test. The ratio of file-sizes correspond to our split of the data.
Step13: Looks good! We now have our ML datasets and are ready to train ML models, validate them and evaluate them.
Step15: <h2>Benchmark on same dataset</h2>
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from google.cloud import bigquery
%%bigquery
SELECT
FORMAT_TIMESTAMP( "%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
LIMIT
10
%%bigquery trips
SELECT
FORMAT_TIMESTAMP( "%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
print(len(trips))
# We can slice Pandas dataframes as if they were arrays
trips[:10]
ax = sns.regplot(
x="trip_distance",
y="fare_amount",
fit_reg=False,
ci=None,
truncate=True,
data=trips,
)
ax.figure.set_size_inches(10, 8)
%%bigquery trips
SELECT
FORMAT_TIMESTAMP( "%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
AND trip_distance > 0
AND fare_amount >= 2.5
print(len(trips))
ax = sns.regplot(
x="trip_distance",
y="fare_amount",
fit_reg=False,
ci=None,
truncate=True,
data=trips,
)
ax.figure.set_size_inches(10, 8)
tollrides = trips[trips["tolls_amount"] > 0]
tollrides[tollrides["pickup_datetime"] == "2012-02-27 09:19:10 UTC"]
notollrides = trips[trips["tolls_amount"] == 0]
notollrides[notollrides["pickup_datetime"] == "2012-02-27 09:19:10 UTC"]
trips.describe()
def showrides(df, numlines):
lats = []
lons = []
for iter, row in df[:numlines].iterrows():
lons.append(row["pickup_longitude"])
lons.append(row["dropoff_longitude"])
lons.append(None)
lats.append(row["pickup_latitude"])
lats.append(row["dropoff_latitude"])
lats.append(None)
sns.set_style("darkgrid")
plt.figure(figsize=(10, 8))
plt.plot(lons, lats)
showrides(notollrides, 10)
showrides(tollrides, 10)
def preprocess(trips_in):
trips = trips_in.copy(deep=True)
trips.fare_amount = trips.fare_amount + trips.tolls_amount
del trips["tolls_amount"]
del trips["total_amount"]
del trips["trip_distance"] # we won't know this in advance!
qc = np.all(
[
trips["pickup_longitude"] > -78,
trips["pickup_longitude"] < -70,
trips["dropoff_longitude"] > -78,
trips["dropoff_longitude"] < -70,
trips["pickup_latitude"] > 37,
trips["pickup_latitude"] < 45,
trips["dropoff_latitude"] > 37,
trips["dropoff_latitude"] < 45,
trips["passenger_count"] > 0,
],
axis=0,
)
return trips[qc]
tripsqc = preprocess(trips)
tripsqc.describe()
shuffled = tripsqc.sample(frac=1)
trainsize = int(len(shuffled["fare_amount"]) * 0.70)
validsize = int(len(shuffled["fare_amount"]) * 0.15)
df_train = shuffled.iloc[:trainsize, :]
df_valid = shuffled.iloc[trainsize : (trainsize + validsize), :] # noqa: E203
df_test = shuffled.iloc[(trainsize + validsize) :, :] # noqa: E203
df_train.head(n=1)
df_train.describe()
df_valid.describe()
df_test.describe()
def to_csv(df, filename):
outdf = df.copy(deep=False)
outdf.loc[:, "key"] = np.arange(0, len(outdf)) # rownumber as key
# Reorder columns so that target is first column
cols = outdf.columns.tolist()
cols.remove("fare_amount")
cols.insert(0, "fare_amount")
print(cols) # new order of columns
outdf = outdf[cols]
outdf.to_csv(filename, header=False, index_label=False, index=False)
to_csv(df_train, "taxi-train.csv")
to_csv(df_valid, "taxi-valid.csv")
to_csv(df_test, "taxi-test.csv")
!head -10 taxi-valid.csv
!ls -l *.csv
%%bash
head taxi-train.csv
def distance_between(lat1, lon1, lat2, lon2):
# Haversine formula to compute distance "as the crow flies".
lat1_r = np.radians(lat1)
lat2_r = np.radians(lat2)
lon_diff_r = np.radians(lon2 - lon1)
sin_prod = np.sin(lat1_r) * np.sin(lat2_r)
cos_prod = np.cos(lat1_r) * np.cos(lat2_r) * np.cos(lon_diff_r)
minimum = np.minimum(1, sin_prod + cos_prod)
dist = np.degrees(np.arccos(minimum)) * 60 * 1.515 * 1.609344
return dist
def estimate_distance(df):
return distance_between(
df["pickuplat"], df["pickuplon"], df["dropofflat"], df["dropofflon"]
)
def compute_rmse(actual, predicted):
return np.sqrt(np.mean((actual - predicted) ** 2))
def print_rmse(df, rate, name):
print(
"{1} RMSE = {0}".format(
compute_rmse(df["fare_amount"], rate * estimate_distance(df)), name
)
)
FEATURES = ["pickuplon", "pickuplat", "dropofflon", "dropofflat", "passengers"]
TARGET = "fare_amount"
columns = list([TARGET])
columns.append("pickup_datetime")
columns.extend(FEATURES) # in CSV, target is first column, after the features
columns.append("key")
df_train = pd.read_csv("taxi-train.csv", header=None, names=columns)
df_valid = pd.read_csv("taxi-valid.csv", header=None, names=columns)
df_test = pd.read_csv("taxi-test.csv", header=None, names=columns)
rate = df_train["fare_amount"].mean() / estimate_distance(df_train).mean()
print(f"Rate = ${rate}/km")
print_rmse(df_train, rate, "Train")
print_rmse(df_valid, rate, "Valid")
print_rmse(df_test, rate, "Test")
validation_query =
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
"unused" AS key
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
client = bigquery.Client()
df_valid = client.query(validation_query).to_dataframe()
print_rmse(df_valid, 2.59988, "Final Validation Set")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Machine learning on the iris dataset
Step2: Machine learning terminology
Step3: Each value we are predicting is the response (also known as
Step4: Resources
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML
HTML('<iframe src=http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data width=300 height=200></iframe>')
# import load_iris function from datasets module
from sklearn.datasets import load_iris
# save "bunch" object containing iris dataset and its attributes
iris = load_iris()
type(iris)
# print the iris data
print iris.data
# print the names of the four features
print iris.feature_names
# print integers representing the species of each observation
print iris.target
# print the encoding scheme for species: 0 = setosa, 1 = versicolor, 2 = virginica
print iris.target_names
# check the types of the features and response
print type(iris.data)
print type(iris.target)
# check the shape of the features (first dimension = number of observations, second dimensions = number of features)
print iris.data.shape
# check the shape of the response (single dimension matching the number of observations)
print iris.target.shape
# store feature matrix in "X"
X = iris.data
# store response vector in "y"
y = iris.target
from IPython.core.display import HTML
def css_styling():
styles = open("styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters
Step2: Modified parameters
Step3: Time series, data collected at the end of each period
Step4: The origin of the partial_corr source is https
Step5: Structural infos, data collected at the beginning of each period
Step6: levels of gray
Step7: Best solutions to produce a LaTeX table from these data (the example is related to ts_df.corr table)
Step8: Data from each firm in each period
Step9: Managing parameter list
Step10: dictionay of values
Step11: dictionay of position
Step13: the parameter checkResConsUnsoldProd (not affecting the model, but only working on its output) appears 20180829 in 28ter experiment; in the first commit, of 20180830, the name is checkResCons, but quite immediately became checkResConsUnsoldProd; the commit of 20181013 signals the we have the output from parameters.py (the experiment 80 is of 20181009, so without that output); all the experiments from 28ter to 80 have internally checkResConsUnsoldProd set to True
|
<ASSISTANT_TASK:>
Python Code:
#ts_df =ts_df [0:45]
#str_df=str_df[0:45]
par_df.astype(str,errors='ignore')
modPars_df.astype(str,errors='ignore')
if len(ts_df.columns) == 6:
ts_df.columns = \
['unempl.','totalProfit','totalProd.','plannedP.','price','wage']
# to have shorter names
if len(ts_df.columns) == 8:
ts_df.columns = \
['unempl.','totalProfit','totalProd.','plannedP.', 'cQ','hPSd','price','wage']
# to have shorter names
ts_df
ts_df.describe()
ts_df.corr(method="pearson").style.format("{:.2}")
wn.filterwarnings(action="ignore") # to eliminate a warning about
#LAPACK lib
np.set_printoptions(precision=2,suppress=True)
ts=ts_df.values
ts_int = np.hstack((np.ones((ts.shape[0],1)), ts))
out1=partial_corr.partial_corr(ts_int)[1:, 1:]
out1
ts=ts_df.drop(columns="plannedP.").values
ts_int = np.hstack((np.ones((ts.shape[0],1)), ts))
out2=partial_corr.partial_corr(ts_int)[1:, 1:]
out2
ts=ts_df.drop(columns="totalProd.").values
ts_int = np.hstack((np.ones((ts.shape[0],1)), ts))
out3=partial_corr.partial_corr(ts_int)[1:, 1:]
out3
ts2_df=ts_df
if len(ts_df.columns) == 6:
ts2_df.columns = \
['unempl','totalProfit','totalProd','plannedP','price','wage']
if len(ts_df.columns) == 8:
ts2_df.columns = \
['unempl','totalProfit','totalProd','plannedP','cQ','hPSd','price','wage']
result = sm.ols(formula="totalProfit ~ price + wage + totalProd + unempl", \
data=ts2_df).fit()
print (result.summary())
str_df
myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="*",
color=["OrangeRed","LawnGreen","Blue","Violet","lightblue","Pink","Gray","Brown"])
myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ')
myPlot.right_ax.set_ylabel('hPriceSd, price, wage')
myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5)
myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5)
myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="",
color=["lightgray","Black","Black","Black","Gray","lightgray","lightgray","lightgray"],
style=['-', '--', '-.', ':','-', '--', '-.'],
linewidth=1.)
myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ')
myPlot.right_ax.set_ylabel('hPriceSd, price, wage')
myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5)
myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5)
myPlot = ts_df.plot(figsize=(11,8),secondary_y=['hPriceSd', 'price','wage'],marker="",
color=["silver","Black","Black","Black","Gray","slategray","slategray","slategray"],
style=['-', '--', '-.', ':','-', '--', '-.'],
linewidth=2.)
myPlot.set_ylabel('unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ')
myPlot.right_ax.set_ylabel('hPriceSd, price, wage')
myPlot.legend(loc='upper left') #, bbox_to_anchor=(-0.35, 0.5)
myPlot.axes.right_ax.legend(loc='lower right') #, bbox_to_anchor=(1.1, 0.5)
str_df.plot(figsize=(11,8),secondary_y='workers',marker="*",color=["r","b"])
str_df.plot(figsize=(11,8),secondary_y='workers',marker="*",color=["black",
"lightgrey"])
str_df.plot(figsize=(11,8),linewidth=2.0,secondary_y='workers',marker="*",color=["black",
"gray"])
corr=ts_df.corr(method='pearson')
def ff(x):
return '%1.2f' % x
if len(ts_df.columns) == 6:
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 8:
print ("\\begin{table}[!htbp]\n{\\tiny \center")
print (corr.to_latex(formatters=[ff,ff,ff,ff,ff,ff,ff,ff]))
print("}\n\\caption{Correlations among the time series of the model,"+\
" with xxx}")
print("\\label{correlations xxx}\n\\end{table}")
ou=out1
if len(ts_df.columns) == 6:
names=['unempl.','totalProfit','totalProd.','plannedP.','price','wage']
if len(ts_df.columns) == 8:
names=['unempl.','totalProfit','totalProd.','plannedP.','cQ','hPSd','price','wage']
if len(ts_df.columns) == 6:
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 8:
print ("\\begin{table}[!htbp]\n{\\tiny \center")
if len(ts_df.columns) == 6:
print ("\\begin{tabular}{lrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & plannedP. & price & wage \\\\"+\
"\n\\midrule")
if len(ts_df.columns) == 8:
print ("\\begin{tabular}{lrrrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & plannedP. & cQ & hPSd & price & wage \\\\"+\
"\n\\midrule")
for i in range(len(ou)):
print(names[i], end="")
for j in range(len(ou[i])):
print(" & %.2f" % ou[i,j], end="")
print(" \\\\")
print("\\bottomrule\n\\end{tabular}")
print("}\n\\caption{Partial correlations among the time series of the model,"+\
" with xxx}")
print("\\label{partial correlations xxx}\n\\end{table}")
ou=out2
if len(ts_df.columns) == 6:
names=['unempl.','totalProfit','totalProd.','price','wage']
if len(ts_df.columns) == 8:
names=['unempl.','totalProfit','totalProd.','cQ','hPSd','price','wage']
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 6:
print ("\\begin{tabular}{lrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & price & wage \\\\"+\
"\n\\midrule")
if len(ts_df.columns) == 8:
print ("\\begin{tabular}{lrrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & totalProd. & cQ & hPSd & price & wage \\\\"+\
"\n\\midrule")
for i in range(len(ou)):
print(names[i], end="")
for j in range(len(ou[i])):
print(" & %.2f" % ou[i,j], end="")
print(" \\\\")
print("\\bottomrule\n\\end{tabular}")
print("}\n\\caption{Partial correlations (no plannedProduction) among the time series of the model,"+\
" with xxx}")
print("\\label{partial correlations (no plannedP.) xxx}\n\\end{table}")
ou=out3
if len(ts_df.columns) == 6:
names=['unempl.','totalProfit','plannedP.','price','wage']
if len(ts_df.columns) == 8:
names=['unempl.','totalProfit','plannedP.','cQ','hPSd','price','wage']
print ("\\begin{table}[!htbp]\n{\\footnotesize \center")
if len(ts_df.columns) == 6:
print ("\\begin{tabular}{lrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & plannedP. & price & wage \\\\"+\
"\n\\midrule")
if len(ts_df.columns) == 8:
print ("\\begin{tabular}{lrrrrrrr}\n\\toprule\n"+\
"{} & unempl. & totalProfit & plannedP. & cQ & hPSd & price & wage \\\\"+\
"\n\\midrule")
for i in range(len(ou)):
print(names[i], end="")
for j in range(len(ou[i])):
print(" & %.2f" % ou[i,j], end="")
print(" \\\\")
print("\\bottomrule\n\\end{tabular}")
print("}\n\\caption{Partial correlations (no totalProduction) among the time series of the model,"+\
" with xxx}")
print("\\label{partial correlations (no totalProd.) xxx}\n\\end{table}")
if firms: print(firms_df.describe())
else: print('no data for each firm in each period')
ctitle=""
if len(par_df.columns)==2: ctitle=par_df.columns[0]
if len(par_df.columns)==3: ctitle=par_df.columns[1]
if len(ts_df.columns) == 6:
parList=par_df[ctitle].tolist()
valList=par_df["Values"].tolist()
if len(ts_df.columns) == 8:
parList=par_df["Parameter internal names"].tolist()
valList=par_df["Values"].tolist()
# both parList are generated by the 'print' of parameters.py
d_val=dict(zip(parList,valList))
d_val
labelsPositions_df= pd.read_csv('labelsPositions.csv')
#labelsPositions_df
parList2=labelsPositions_df["name"].tolist()
posList=labelsPositions_df["position"].tolist()
d_pos=dict(zip(parList2,posList))
#d_pos
row=['-']*53 # 52 parameters, pos. 0 is used for unuseful values
row[44]='51' # as default value for the par 'startHayekianMarket' for old
# SMAC versions where it was not defined
for _ in range(len(parList)):
if parList[_]=='w': row[d_pos['Q']]=d_val[parList[_]]
if parList[_] in d_pos: row[d_pos[parList[_]]]=d_val[parList[_]]
else: print('not found:',parList[_])
import platform
def creation_date(path_to_file):
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else: #MacOs
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
#converter https://www.unixtimestamp.com
fileTime=creation_date("./"+nameFilePar)
if fileTime >= 1535414400 and fileTime <= 1539129600:
row[8]='True'
#row
#for i in range(1,len(row)-1):
# print(row[i],"& ",end='')
#print(row[-1])
for i in range(1,26):
print(row[i],"& ",end='')
print(row[26])
for i in range(27,len(row)-1):
print(row[i],"& ",end='')
if '[' in row[-1]: row[-1]=row[-1][1:5] # [1:5] is to avoid the [ ] output
print(row[-1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's load up the NSFG data again.
Step2: And select live, full-term births.
Step3: And drop rows with missing data (just for the variables we want).
Step4: Check a few rows
Step5: And summarize a few variables.
Step6: Here's a scatterplot of age and birthweight, with parameters tuned to avoid saturation.
Step7: Mean of mother's age
Step8: Mean and standard deviation of birthweight
Step9: And the coefficient of correlation
Step10: The Pandas corr function gets the same result
Step11: To see the relationship more clearly, we can group mother's age into 3-year bins and plot percentiles of birth weight for each bin.
Step13: The first and last points are not very reliable, because they represent fewer data points.
Step14: There are 7643 respondents and 3087 variables about each.
Step15: If we use the caseid variable as the index, we can look up respondents efficiently by id.
Step16: Now we can join the tables, using the caseid from each pregnancy record to find the corresponding respondent and (abstractly) copy over the additional variables.
Step17: The encoding for screentime is a colon-separated timestamp.
Step18: If we convert to a datetime object, we avoid some processing problems later.
Step19: To estimate the effect of mother's age on birthweight, we can use a simple least squares fit.
Step20: The slope is almost 3 ounces per decade.
Step21: The results object contains the parameters (and all the other info in the table)
Step22: And the results are consistent with my implementation
Step23: We can use a boolean variable as a predictor
Step24: First babies are lighter by about 1.5 ounces.
Step25: And we can make a model with multiple predictors.
Step26: If we control for mother's age, the difference in weight for first babies is cut to about 0.5 ounces (and no longer statistically significant).
Step27: The relationship with age might be non-linear. Adding a quadratic term helps a little, although note that the $R^2$ values for all of these models are very small.
Step28: Now we can combine the quadratic age model with isfirst
Step29: Now the effect is cut to less that a third of an ounce, and very plausibly due to chance.
Step30: Here's the best model I found, combining all variables that seemed plausibly predictive.
Step31: All predictors are statistically significant, so the effects could be legit, but the $R^2$ value is still very small
Step32: The estimated parameter is 0.0016, which is small and not statistically significant. So the apparent relationship might be due to chance.
Step33: And we can use the odds ratio to update a prior probability. A mother at the mean age has a 51% chance of having a boy.
Step34: I searched for other factors that might be predictive. The most likely candidates turn out not to be statistically significant.
Step35: Again, taking these parameters at face values, we can use the model to make predictions.
Step36: results.predict uses the model to generate predictions for the data.
Step37: And we can use the model to generate a prediction for the office pool.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
import numpy as np
import pandas as pd
import first
import thinkstats2
import thinkplot
%matplotlib inline
live, firsts, others = first.MakeFrames()
live.shape
live = live[live.prglngth>=37]
live.shape
live = live.dropna(subset=['agepreg', 'totalwgt_lb'])
live.shape
live.head()
live[['agepreg', 'totalwgt_lb']].describe()
ages = live.agepreg
weights = live.totalwgt_lb
thinkplot.Scatter(ages, weights, alpha=0.1, s=15)
thinkplot.Config(xlabel='age (years)',
ylabel='weight (lbs)',
xlim=[10, 45],
ylim=[0, 15],
legend=False)
live['agepreg'].mean()
live['totalwgt_lb'].mean(), live['totalwgt_lb'].std()
thinkstats2.Corr(ages, weights)
live['totalwgt_lb'].corr(live['agepreg'])
bins = np.arange(10, 48, 3)
indices = np.digitize(live.agepreg, bins)
groups = live.groupby(indices)
ages = [group.agepreg.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.totalwgt_lb) for i, group in groups][1:-1]
thinkplot.PrePlot(5)
for percent in [90, 75, 50, 25, 10]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(ages, weights, label=label)
thinkplot.Config(xlabel="mother's age (years)",
ylabel='birth weight (lbs)',
xlim=[14, 50],
legend=True)
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
nrows=None):
Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
dct = thinkstats2.ReadStataDct(dct_file)
df = dct.ReadFixedWidth(dat_file, compression='gzip', nrows=nrows)
return df
resp = ReadFemResp()
resp.shape
resp.index = resp.caseid
resp.head()
join = live.join(resp, on='caseid', rsuffix='_r')
join.shape
join.screentime.head()
join.screentime = pd.to_datetime(join.screentime)
join.screentime.head()
ages = join.agepreg
weights = join.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
inter, slope, slope*16*10
import statsmodels.formula.api as smf
formula = ('totalwgt_lb ~ agepreg')
results = smf.ols(formula, data=join).fit()
results.summary()
inter, slope = results.params
inter, slope
slope * 16 * 10 # slope in ounces per decade
join['isfirst'] = (join.birthord == 1)
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=join).fit()
results.summary()
results.params['isfirst[T.True]'] * 16
formula = 'totalwgt_lb ~ agepreg + isfirst'
results = smf.ols(formula, data=join).fit()
results.summary()
results.params['isfirst[T.True]'] * 16
join['age2'] = join.agepreg**2
formula = 'totalwgt_lb ~ agepreg + age2'
results = smf.ols(formula, data=join).fit()
results.summary()
formula = 'totalwgt_lb ~ agepreg + age2 + isfirst'
results = smf.ols(formula, data=join).fit()
results.summary()
results.params['isfirst[T.True]'] * 16
formula = ('totalwgt_lb ~ agepreg + age2 + C(race) + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
results.summary()
live['isboy'] = (live.babysex==1).astype(int)
model = smf.logit('isboy ~ agepreg', data=live)
results = model.fit()
results.summary()
log_odds_ratio = results.params['agepreg'] * 10
odds_ratio = np.exp(log_odds_ratio)
odds_ratio
p = 0.51
prior_odds = p / (1-p)
post_odds = prior_odds * odds_ratio
p = post_odds / (post_odds + 1)
p
formula = 'isboy ~ agepreg + hpagelb + birthord + C(race)'
model = smf.logit(formula, data=live)
results = model.fit()
results.summary()
exog = pd.DataFrame(model.exog, columns=model.exog_names)
endog = pd.DataFrame(model.endog, columns=[model.endog_names])
actual = endog['isboy']
baseline = actual.mean()
baseline
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
acc
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pd.DataFrame([[39, 30, 1, 2]], columns=columns)
y = results.predict(new)
y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's first look at two wine characteristics
Step2: PCA on a Subset of the Wine Data
Step3: Let's visualize the normalized data and its principal components.
Step4: Let's transform the normalized data to the principal component space
Step5: Homework $1$
Step6: We consider a toy document collection (corpus) and a query for this tutorial.
Step7: We now build a term frequency (TF) matrix from the corpus using the Python sklearn package.
Step8: Let's look at the corpus vocabulary terms.
Step9: TF-IDF
Step10: Information Retrieval via TF-IDF
Step11: Latent Semantic Analysis (LSA)
Step12: Information Retrieval via LSA
|
<ASSISTANT_TASK:>
Python Code:
# We will first read the wine data headers
f = open("wine.data")
header = f.readlines()[0]
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg as la
# Read the data file (text format): wine.data, delimiter=',', use columns 0, 1, 10, skip the header
wine_class, wine_alc, wine_col = np.loadtxt("wine.data", delimiter=',', usecols=(0, 1, 10), unpack=True, skiprows=1)
# draw a scatter plot of wine color intensity and alcohol content
# Perform PCA on two wine characteristics: **Alcohol Content** and **Color Intensity**
col_alc = np.matrix([wine_col, wine_alc]).T
m, n = col_alc.shape
# compute column means
# center the data with column means
# calculate the covariance matrix
# calculate eigenvectors & eigenvalues of the covariance matrix
# sort eigenvalues and eigenvectors in decreasing order
# Create a scatter plot of the normalized data
# color intensity of the x-axis and alcohol content on the y-axis
# Plot the principal component line
# the PCA tranformation
# Plot the data points in the new space
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from scipy.spatial.distance import cosine
corpus = [
"Romeo and Juliet.", # document 1
"Juliet: O happy dagger!", # document 2
"Romeo died by dagger.", # document 3
"'Live free or die', that's the New-Hampshire's motto.", # document 4
"Did you know, New-Hampshire is in New-England." # document 5
]
key_words = [
'die',
'dagger'
]
# initialize the countvetorizer class
vectorizer = CountVectorizer(min_df=0, stop_words=None)
# transform the corpus based on the count vectorizer
# print the vocabulary
# A custom stopword list
stop_words = ["a", "an", "the", "and", "in", "by", "or", "did", "you", "is", "that"]
# Here, we assume that we preprocessed the corpus
preprocessed_corpus = [
"Romeo and Juliet",
"Juliet O happy dagger",
"Romeo die by dagger",
"Live free or die that the NewHampshire motto",
"Did you know NewHampshire is in NewEngland"
]
# Customize the vectorizer class
# transform the corpus based on the count vectorizer
# print the vocabulary
# query keywords
key_words = ['die', 'dagger']
# To keep the development simple, we build a composite model for both the corpus and the query
corpus = preprocessed_corpus + [' '.join(key_words)]
# transform the corpus based on the count vectorizer
# TF-IDF transform using TfidfTransformer
# transform the TF matrix to TF-IDF matrix
# D x V document-term matrix
# 1 x V query-term vector
# Find cosine distance b/w the TF-IDF vectors of every document and the query
# Sort them and create the rank list
K = 2 # number of components
# Find cosine distance b/w the TF-IDF vectors of every document and the query
# Sort them and create the rank list
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Data inspection
Step3: Feature imputation
Step4: Feature augmentation
Step5: Generate training, validation and test data splits
Step6: Classification parameters optimization
Step7: Predict labels on test data
|
<ASSISTANT_TASK:>
Python Code:
# Import
from __future__ import division
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (20.0, 10.0)
inline_rc = dict(mpl.rcParams)
from classification_utilities import make_facies_log_plot
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import LeavePGroupsOut
from sklearn.metrics import f1_score
from sklearn.multiclass import OneVsOneClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from scipy.signal import medfilt
import sys, scipy, sklearn
print('Python: ' + sys.version.split('\n')[0])
print(' ' + sys.version.split('\n')[1])
print('Pandas: ' + pd.__version__)
print('Numpy: ' + np.__version__)
print('Scipy: ' + scipy.__version__)
print('Sklearn: ' + sklearn.__version__)
# Parameters
feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS']
facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
# Load data from file
data = pd.read_csv('../facies_vectors.csv')
# Store features and labels
X = data[feature_names].values # features
y = data['Facies'].values # labels
# Store well labels and depths
well = data['Well Name'].values
depth = data['Depth'].values
# Define function for plotting feature statistics
def plot_feature_stats(X, y, feature_names, facies_colors, facies_names):
# Remove NaN
nan_idx = np.any(np.isnan(X), axis=1)
X = X[np.logical_not(nan_idx), :]
y = y[np.logical_not(nan_idx)]
# Merge features and labels into a single DataFrame
features = pd.DataFrame(X, columns=feature_names)
labels = pd.DataFrame(y, columns=['Facies'])
for f_idx, facies in enumerate(facies_names):
labels[labels[:] == f_idx] = facies
data = pd.concat((labels, features), axis=1)
# Plot features statistics
facies_color_map = {}
for ind, label in enumerate(facies_names):
facies_color_map[label] = facies_colors[ind]
sns.pairplot(data, hue='Facies', palette=facies_color_map, hue_order=list(reversed(facies_names)))
# Feature distribution
plot_feature_stats(X, y, feature_names, facies_colors, facies_names)
mpl.rcParams.update(inline_rc)
# Facies per well
for w_idx, w in enumerate(np.unique(well)):
ax = plt.subplot(3, 4, w_idx+1)
hist = np.histogram(y[well == w], bins=np.arange(len(facies_names)+1)+.5)
plt.bar(np.arange(len(hist[0])), hist[0], color=facies_colors, align='center')
ax.set_xticks(np.arange(len(hist[0])))
ax.set_xticklabels(facies_names)
ax.set_title(w)
# Features per well
for w_idx, w in enumerate(np.unique(well)):
ax = plt.subplot(3, 4, w_idx+1)
hist = np.logical_not(np.any(np.isnan(X[well == w, :]), axis=0))
plt.bar(np.arange(len(hist)), hist, color=facies_colors, align='center')
ax.set_xticks(np.arange(len(hist)))
ax.set_xticklabels(feature_names)
ax.set_yticks([0, 1])
ax.set_yticklabels(['miss', 'hit'])
ax.set_title(w)
reg = RandomForestRegressor(max_features='sqrt', n_estimators=50)
DataImpAll = data[feature_names].copy()
DataImp = DataImpAll.dropna(axis = 0, inplace=False)
Ximp=DataImp.loc[:, DataImp.columns != 'PE']
Yimp=DataImp.loc[:, 'PE']
reg.fit(Ximp, Yimp)
X[np.array(DataImpAll.PE.isnull()),4] = reg.predict(DataImpAll.loc[DataImpAll.PE.isnull(),:].drop('PE',axis=1,inplace=False))
# Feature windows concatenation function
def augment_features_window(X, N_neig):
# Parameters
N_row = X.shape[0]
N_feat = X.shape[1]
# Zero padding
X = np.vstack((np.zeros((N_neig, N_feat)), X, (np.zeros((N_neig, N_feat)))))
# Loop over windows
X_aug = np.zeros((N_row, N_feat*(2*N_neig+1)))
for r in np.arange(N_row)+N_neig:
this_row = []
for c in np.arange(-N_neig,N_neig+1):
this_row = np.hstack((this_row, X[r+c]))
X_aug[r-N_neig] = this_row
return X_aug
# Feature gradient computation function
def augment_features_gradient(X, depth):
# Compute features gradient
d_diff = np.diff(depth).reshape((-1, 1))
d_diff[d_diff==0] = 0.001
X_diff = np.diff(X, axis=0)
X_grad = X_diff / d_diff
# Compensate for last missing value
X_grad = np.concatenate((X_grad, np.zeros((1, X_grad.shape[1]))))
return X_grad
# Feature augmentation function
def augment_features(X, well, depth, N_neig=1):
# Augment features
X_aug = np.zeros((X.shape[0], X.shape[1]*(N_neig*2+2)))
for w in np.unique(well):
w_idx = np.where(well == w)[0]
X_aug_win = augment_features_window(X[w_idx, :], N_neig)
X_aug_grad = augment_features_gradient(X[w_idx, :], depth[w_idx])
X_aug[w_idx, :] = np.concatenate((X_aug_win, X_aug_grad), axis=1)
# Find padded rows
padded_rows = np.unique(np.where(X_aug[:, 0:7] == np.zeros((1, 7)))[0])
return X_aug, padded_rows
# Augment features
X_aug, padded_rows = augment_features(X, well, depth)
# Initialize model selection methods
lpgo = LeavePGroupsOut(2)
# Generate splits
split_list = []
for train, val in lpgo.split(X, y, groups=data['Well Name']):
hist_tr = np.histogram(y[train], bins=np.arange(len(facies_names)+1)+.5)
hist_val = np.histogram(y[val], bins=np.arange(len(facies_names)+1)+.5)
if np.all(hist_tr[0] != 0) & np.all(hist_val[0] != 0):
split_list.append({'train':train, 'val':val})
# Print splits
for s, split in enumerate(split_list):
print('Split %d' % s)
print(' training: %s' % (data['Well Name'][split['train']].unique()))
print(' validation: %s' % (data['Well Name'][split['val']].unique()))
# Parameters search grid (uncomment parameters for full grid search... may take a lot of time)
N_grid = [100] # [50, 100, 150]
M_grid = [10] # [5, 10, 15]
S_grid = [25] # [10, 25, 50, 75]
L_grid = [5] # [2, 3, 4, 5, 10, 25]
param_grid = []
for N in N_grid:
for M in M_grid:
for S in S_grid:
for L in L_grid:
param_grid.append({'N':N, 'M':M, 'S':S, 'L':L})
# Train and test a classifier
def train_and_test(X_tr, y_tr, X_v, well_v, param):
# Feature normalization
scaler = preprocessing.RobustScaler(quantile_range=(25.0, 75.0)).fit(X_tr)
X_tr = scaler.transform(X_tr)
X_v = scaler.transform(X_v)
# Train classifier
clf = OneVsOneClassifier(RandomForestClassifier(n_estimators=param['N'], criterion='entropy',
max_features=param['M'], min_samples_split=param['S'], min_samples_leaf=param['L'],
class_weight='balanced', random_state=0), n_jobs=-1)
clf.fit(X_tr, y_tr)
# Test classifier
y_v_hat = clf.predict(X_v)
# Clean isolated facies for each well
for w in np.unique(well_v):
y_v_hat[well_v==w] = medfilt(y_v_hat[well_v==w], kernel_size=5)
return y_v_hat
# For each set of parameters
score_param = []
for param in param_grid:
# For each data split
score_split = []
for split in split_list:
# Remove padded rows
split_train_no_pad = np.setdiff1d(split['train'], padded_rows)
# Select training and validation data from current split
X_tr = X_aug[split_train_no_pad, :]
X_v = X_aug[split['val'], :]
y_tr = y[split_train_no_pad]
y_v = y[split['val']]
# Select well labels for validation data
well_v = well[split['val']]
# Train and test
y_v_hat = train_and_test(X_tr, y_tr, X_v, well_v, param)
# Score
score = f1_score(y_v, y_v_hat, average='micro')
score_split.append(score)
# Average score for this param
score_param.append(np.mean(score_split))
print('F1 score = %.3f %s' % (score_param[-1], param))
# Best set of parameters
best_idx = np.argmax(score_param)
param_best = param_grid[best_idx]
score_best = score_param[best_idx]
print('\nBest F1 score = %.3f %s' % (score_best, param_best))
# Load data from file
test_data = pd.read_csv('../validation_data_nofacies.csv')
# Prepare training data
X_tr = X
y_tr = y
# Augment features
X_tr, padded_rows = augment_features(X_tr, well, depth)
# Removed padded rows
X_tr = np.delete(X_tr, padded_rows, axis=0)
y_tr = np.delete(y_tr, padded_rows, axis=0)
# Prepare test data
well_ts = test_data['Well Name'].values
depth_ts = test_data['Depth'].values
X_ts = test_data[feature_names].values
# Augment features
X_ts, padded_rows = augment_features(X_ts, well_ts, depth_ts)
# Predict test labels
y_ts_hat = train_and_test(X_tr, y_tr, X_ts, well_ts, param_best)
# Save predicted labels
test_data['Facies'] = y_ts_hat
test_data.to_csv('ar4_predicted_facies_submission002.csv')
# Plot predicted labels
make_facies_log_plot(
test_data[test_data['Well Name'] == 'STUART'],
facies_colors=facies_colors)
make_facies_log_plot(
test_data[test_data['Well Name'] == 'CRAWFORD'],
facies_colors=facies_colors)
mpl.rcParams.update(inline_rc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'sandbox-2', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sequences to Query
Step2: Motifs
Step3: Postion Specific scoring matrix
Step4: Searching for TFBS
Step5: Test Motifs present
|
<ASSISTANT_TASK:>
Python Code:
from Bio import motifs
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC, generic_dna, generic_protein
import pandas as pd
import os, sys
for seq_record in SeqIO.parse("../data/fasta/ludwig_eve-striped-2.fasta", "fasta"):
print(seq_record.id)
print(repr(seq_record.seq))
print(len(seq_record))
records = list(SeqIO.parse("../data/fasta/ludwig_eve-striped-2.fasta", "fasta"))
# Use only the first record
seqTest = records[0]
print(seqTest.seq)
print(type(seqTest))
str(seqTest.id)
# Why can't I put this first? Because I am writing over?
# Why do I have to put str in front of Seq
seqTest = Seq(str(seqTest.seq), IUPAC.IUPACUnambiguousDNA())
print(seqTest)
print(type(seqTest))
bcd = motifs.read(open("../data/PWM/transpose_fm/bcd_FlyReg.fm"),"pfm")
print(bcd.counts)
# If you want to normalize, but has to be based off of actual sequences.
# the problem is that you have to normalize to get pwm and you need a pwm object
# to get the Position Specific scoring Matrix.
# Therefore I am just normalizing to zero.
# I may need to adjust psuedocounts for what the sequence really is or to the Drosophila genome??
pwm = bcd.counts.normalize(pseudocounts=0.0)
print(pwm)
print(type(pwm))
print(vars(pwm))
dir(pwm)
sys.getsizeof(pwm)
pssm = pwm.log_odds()
print(id(pssm.alphabet)) # id() gives a unique hash. Good to see when data changes.
print(id(pssm))
print(pssm)
print(type(pssm))
print(pssm.alphabet)
#pssm #If you ask for the raw
# Forward
pssm.calculate(seqTest)
# Reverse
rpssm = pssm.reverse_complement()
# rpssm.calculate(seqTest) #You can see the calculations
print("Background: %f" % bcd.pssm.mean(bcd.background))
distribution = pssm.distribution(background=bcd.background, precision=10**4)
#print(type(distribution)) #check
#print(distribution) #check
dir(distribution) #there is a threshold for patser.
# that is what we should use.
threshold = distribution.threshold_patser()
print("Patser Threshold %5.3f" % threshold)
for position, score in pssm.search(seqTest, threshold=6):
print("Position %d: score = %5.3f" % (position, score))
#initiate
allPos = []
print(seqTest)
#how do I output a vector?
for position, score in pssm.search(seqTest, threshold=3.262):
allPos.append(position)
print(type(allPos))
#length of motif
m = 7
print(len(seqTest))
print(seqTest[0:10]) # first 10 nucleotides
print(seqTest[885:898]) # last 10 nucleotides
print(seqTest[-13:-1]) # with negative numbers, which go from last position.
# But you cannot use zero. How would you identify the last nucleotide?
print(seqTest[-1]) #first, can be called here though?
print(seqTest[-2])
print(seqTest[-3])
for pos in allPos:
print(seqTest[pos:pos+m]) # this will find and print the motifs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Load python class and set required resolution
Step2: 2. Regrid DEM file
Step3: 3. Regrid Rain file
Step4: 4. Regrid Tectonic files
Step5: 3D displacements file
|
<ASSISTANT_TASK:>
Python Code:
import sys
print(sys.version)
print(sys.executable)
%matplotlib inline
# Import badlands grid generation toolbox
import pybadlands_companion.resizeInput as resize
#help(resize.resizeInput.__init__)
newRes = resize.resizeInput(requestedSpacing = 40)
#help(newRes.regridDEM)
newRes.regridDEM(inDEM='mountain/data/nodes.csv',outDEM='mountain/data/newnodes.csv')
#help(newRes.regridRain)
newRes.regridRain(inRain='data/rain.csv',outRain='newrain.csv')
#help(newRes.regridTecto)
newRes.regridTecto(inTec='data/disp.csv', outTec='newdisp.csv')
#help(newRes.regridDisp)
newRes.regridDisp(inDisp='data/disp.csv', outDisp='newdisp.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Computing the forward operator
Step2: Visualization the coregistration
Step3: Compute Source Space
Step4: The surface based source space src contains two parts, one for the left
Step5: To compute a volume based source space defined with a grid of candidate
Step6: To compute a volume based source space defined with a grid of candidate
Step7: With the surface-based source space only sources that lie in the plotted MRI
Step8: Compute forward solution
Step9: Note that the
Step10: We can explore the content of fwd to access the numpy array that contains
Step11: To extract the numpy array containing the forward operator corresponding to
Step12: This is equivalent to the following code that explicitly applies the
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import mne
from mne.datasets import sample
data_path = sample.data_path()
# the raw file containing the channel location + types
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# The paths to Freesurfer reconstructions
subjects_dir = data_path + '/subjects'
subject = 'sample'
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', orientation='coronal')
# The transformation file obtained by coregistration
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
info = mne.io.read_info(raw_fname)
# Here we look at the dense head, which isn't used for BEM computations but
# is useful for coregistration.
mne.viz.plot_alignment(info, trans, subject=subject, dig=True,
meg=['helmet', 'sensors'], subjects_dir=subjects_dir,
surfaces='head-dense')
src = mne.setup_source_space(subject, spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
print(src)
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=src, orientation='coronal')
sphere = (0.0, 0.0, 40.0, 90.0)
vol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,
sphere=sphere)
print(vol_src)
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=vol_src, orientation='coronal')
surface = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
vol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,
surface=surface)
print(vol_src)
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=vol_src, orientation='coronal')
import numpy as np # noqa
from mayavi import mlab # noqa
from surfer import Brain # noqa
brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)
surf = brain.geo['lh']
vertidx = np.where(src[0]['inuse'])[0]
mlab.points3d(surf.x[vertidx], surf.y[vertidx],
surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
conductivity = (0.3,) # for single layer
# conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject='sample', ico=4,
conductivity=conductivity,
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
fwd = mne.make_forward_solution(raw_fname, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=5.0, n_jobs=2)
print(fwd)
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd_fixed['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
n_dipoles = leadfield.shape[1]
vertices = [src_hemi['vertno'] for src_hemi in fwd_fixed['src']]
stc = mne.SourceEstimate(1e-9 * np.eye(n_dipoles), vertices, tmin=0., tstep=1)
leadfield = mne.apply_forward(fwd_fixed, stc, info).data / 1e-9
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup the simulation
Step2: Even for small samples, the sampling deviation of the mean is well behaved
Step3: As one would hope, the sample means appear to be centered around the true mean, regardless of sample size. The standard error of these distributions decreases with increasing sample size as we would expect.
Step4: Here's an alternative way to print this table by creating it as a Pandas DataFrame.
Step5: For small samples, sample standard deviations systematically underestimate the population standard deviation
Step6: Uh oh! There's very clear indication that the the sampling distribution of standard deviations is not centered around 1 for $n=3$, and it looks like that may be the case for $n=5$.
Step7: The above plot suggests that for samples where $n < 30$ we tend get biased estimates of the population standard deviation (even using our standard "unbiased estimator"). However, for samples of $n \geq 30$ the bias is small or non-existant.
Step8: Examining the plots above, we see that for the smallest sample sizes the observed z-scores of the mean have much heavier tails than we predict. As sample size increases, this effect becomes less and less noticable such that by the time $n=30$ the expected normal PDF fits the observed z-scores very well. The heavy tails at small $n$ are driven by the fact that small samples tend to underestimate the population standard deviation.
Step9: More comparisons of the t- and normal distributions
|
<ASSISTANT_TASK:>
Python Code:
# standard imports
%matplotlib inline
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('bmh')
# setup the popn distribution we're sampling from
mu, sigma = 10, 1
distn = stats.norm(loc=mu, scale=sigma)
# list of sample sizes we'll generate
ssizes = [2, 3, 4, 5, 7, 10, 20, 30]
# number of simulations to carry out *for each sample size*
nsims = 1000
# Use a list comprehension to generate the simulated samples
# for each choice of sample size. We could also have done this
# in a for-loop. I personally find the list comprehension
# more readable but YMMV.
#
# The resulting list is filled with arrays of different sizes,
# with sizes corresponding to the ssizes list above
# samples = [ [2 x 1000 array], [3 x 1000 array], ... , [30 x 1000 array] ]
samples = [distn.rvs(size=(i, nsims)) for i in ssizes]
# calculate the means, std devs, std errors, and z-scores for each sample.
# For the first two calculations we use the argument "axis = 0" to indicate that
# we want to do the calculations "column-wise" (across the rows) of the
# arrays in samples.
#
# Again we use list comprehensions instead of for-loops for compactness.
smeans = [np.mean(i, axis=0) for i in samples] # sample means
sstds = [np.std(i, axis=0, ddof=1) for i in samples] # sample std devs
# When calculating the sample std errors, since we need two pieces of information,
# we use the zip function to bundle the std deviations and sample sizes into a
# tuple. Read the Python library description of `zip` to make sure you understand
# this function as it's very useful.
sse = [sstd/np.sqrt(size) for (sstd, size) in zip(sstds, ssizes)]
# Finally we calculate z-scores for the observed means. We do the same
# bundling trick with zip that we used for calculatings SEs
zscores = [(mean-mu)/SE for (mean, SE) in zip (smeans, sse)]
for i, size in enumerate(ssizes):
plt.hist(smeans[i], alpha=0.5, bins=50, normed=True,
histtype='stepfilled',label = "n = %d" % size)
plt.xlabel("Sample means")
plt.ylabel("Density")
plt.legend(loc='best')
plt.title("Sampling Distributions of the Mean\nfor samples of size n")
pass
hdrfmt = "{:^7}\t{:^18}"
print(hdrfmt.format("sample", "Mean of Sampling"))
print(hdrfmt.format("size", "Distn of the Mean"))
print(hdrfmt.format("="*7, "="*18))
fmt = "{:>7d}\t{:>18.3f}"
for i in range(len(ssizes)):
print(fmt.format(ssizes[i], np.mean(smeans[i])))
import pandas as pd
d = pd.DataFrame()
d["sample_size"] = ssizes
d["mean_of_means"] = [np.mean(i) for i in smeans]
d
idx3 = ssizes.index(3) # do you know what index does? if not, look it up!
idx10 = ssizes.index(10)
idx30 = ssizes.index(30)
ss = [3,5,30]
idxs = [idx3, idx10, idx30]
fig, axes = plt.subplots(1, 3)
fig.set_size_inches(15,6)
for i in range(len(ss)):
ax = axes[i]
ax.hist(sstds[idxs[i]], alpha=0.5, bins=21, normed=True,
histtype='stepfilled',label = "n = %d" % ss[i])
ax.set_xlabel("Sample standard deviations")
ax.set_ylabel("Density")
ax.legend(loc='best')
fig.suptitle("Sampling Distributions of the Std Dev\nfor samples of size n",
fontsize=14)
pass
expected_stds = np.ones_like(ssizes) # all expected standard deviations are 1
mean_stds = [np.mean(i) for i in sstds]
plt.plot(ssizes, expected_stds, linestyle='dashed', color='black',
label="Expected")
plt.plot(ssizes, mean_stds, marker='o', color='red',
label="Observed")
plt.xlabel("Sample Size")
plt.ylabel("Mean of Sampling Distribution of Std Dev")
plt.xlim(0, max(ssizes)*1.1)
plt.ylim(0, sigma * 1.1)
plt.legend(loc='best')
pass
fig, plotmtx = plt.subplots(nrows=4, ncols=2)
fig.set_size_inches(12,20)
x = np.linspace(-10, 10, 200)
expected_pdf = stats.norm.pdf(x)
# plot first 8 distributions of z-scores
ct = 0
for row in plotmtx:
for subplot in row:
subplot.hist(zscores[ct], bins=np.arange(-10,10,0.5),
normed=True, color='gray', alpha=0.5, label="Observed Z-scores")
subplot.plot(x, expected_pdf, color='firebrick',label="Expected Z-scores")
subplot.legend(loc='best', fontsize=9)
subplot.set_title("Z-scores, n = {:d}".format(ssizes[ct]))
ct += 1
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)
fig.set_size_inches(15,4)
x = np.linspace(-10, 10, 200)
norm_pdf = stats.norm.pdf(x)
z2 = zscores[ssizes.index(2)]
z3 = zscores[ssizes.index(3)]
z10 = zscores[ssizes.index(10)]
ax1.hist(z2, bins=np.arange(-10,10,0.5),
normed=True, color='gray', alpha=0.5, label="Observed")
ax1.plot(x, norm_pdf, color='firebrick', label="normal pdf")
ax1.plot(x, stats.t.pdf(x, df=2), color='steelblue',
linewidth=3, label="$t_2$ pdf")
ax1.legend(loc='best', fontsize=9)
ax1.set_title("Z-scores, n = 2")
ax2.hist(z3, bins=np.arange(-10,10,0.5),
normed=True, color='gray', alpha=0.5, label="Observed")
ax2.plot(x, norm_pdf, color='firebrick',label="normal pdf")
ax2.plot(x, stats.t.pdf(x, df=3), color='steelblue',
linewidth=3, label="$t_{3}$ pdf")
ax2.legend(loc='best', fontsize=9)
ax2.set_title("Z-scores, n = 3")
ax3.hist(z10, bins=np.arange(-10,10,0.5),
normed=True, color='gray', alpha=0.5, label="Observed")
ax3.plot(x, norm_pdf, color='firebrick',label="normal pdf")
ax3.plot(x, stats.t.pdf(x, df=10), color='steelblue',
linewidth=3, label="$t_{10}$ pdf")
ax3.legend(loc='best', fontsize=9)
ax3.set_title("Z-scores, n = 10")
pass
fig = plt.figure()
fig.set_size_inches(10,4)
df = [2, 4, 30]
x = np.linspace(-6, 6, 200)
for i in df:
plt.plot(x, stats.t.pdf(x, df=i), alpha=0.5, label='t({:d})'.format(i))
plt.plot(x, stats.norm.pdf(x), alpha=0.5, linestyle='dashed', label='normal',
color='orange', linewidth=3)
plt.xlabel("Z-score")
plt.ylabel("Density")
plt.legend(loc='best')
pass
fig, (ax1, ax2) = plt.subplots(1,2)
fig.set_size_inches(12,4)
x = np.linspace(-6, 6, 200)
leftx = np.linspace(-6, -2, 100)
tProbLess2 = stats.t.cdf(-2, df=5)
nProbLess2 = stats.norm.cdf(-2)
ax1.plot(x, stats.t.pdf(x, df=5), alpha=0.5, linewidth=3, label='t({:d})'.format(i))
ax1.fill_between(leftx, stats.t.pdf(leftx, df=5), color='gray', alpha=0.75)
ax1.text(-5.5, 0.3, "$P(z \leq 2) = {:.3f}$".format(tProbLess2), fontsize=16)
ax1.set_xlabel("Z-score")
ax1.set_ylabel("Density")
ax2.plot(x, stats.norm.pdf(x), alpha=0.5, label='normal', linewidth=3)
ax2.fill_between(leftx, stats.norm.pdf(leftx), color='gray', alpha=0.75)
ax2.text(-5.5, 0.3, "$P(z \leq 2) = {:.3f}$".format(nProbLess2), fontsize=16)
ax2.set_xlabel("Z-score")
ax2.set_ylabel("Density")
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Button is not used to represent a data type. Instead the button widget is used to handle mouse clicks. The on_click method of the Button can be used to register function to be called when the button is clicked. The doc string of the on_click can be seen below.
Step2: Example
Step3: on_submit
Step4: Traitlet events
Step5: Signatures
Step6: Linking Widgets
Step7: Function traitlets.link and traitlets.dlink return a Link or DLink object. The link can be broken by calling the unlink method.
Step8: Linking widgets attributes from the client side
Step9: Function widgets.jslink returns a Link widget. The link can be broken by calling the unlink method.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import ipywidgets as widgets
print(widgets.Button.on_click.__doc__)
from IPython.display import display
button = widgets.Button(description="Click Me!")
display(button)
def on_button_clicked(b):
print("Button clicked.")
button.on_click(on_button_clicked)
text = widgets.Text()
display(text)
def handle_submit(sender):
print(text.value)
text.on_submit(handle_submit)
print(widgets.Widget.on_trait_change.__doc__)
int_range = widgets.IntSlider()
display(int_range)
def on_value_change(name, value):
print(value)
int_range.on_trait_change(on_value_change, 'value')
import traitlets
# Create Caption
caption = widgets.Latex(value = 'The values of slider1 and slider2 are synchronized')
# Create IntSlider
slider1 = widgets.IntSlider(description='Slider 1')
slider2 = widgets.IntSlider(description='Slider 2')
# Use trailets to link
l = traitlets.link((slider1, 'value'), (slider2, 'value'))
# Display!
display(caption, slider1, slider2)
# Create Caption
caption = widgets.Latex(value = 'Changes in source values are reflected in target1')
# Create Sliders
source = widgets.IntSlider(description='Source')
target1 = widgets.IntSlider(description='Target 1')
# Use dlink
dl = traitlets.dlink((source, 'value'), (target1, 'value'))
display(caption, source, target1)
# May get an error depending on order of cells being run!
l.unlink()
dl.unlink()
# NO LAG VERSION
caption = widgets.Latex(value = 'The values of range1 and range2 are synchronized')
range1 = widgets.IntSlider(description='Range 1')
range2 = widgets.IntSlider(description='Range 2')
l = widgets.jslink((range1, 'value'), (range2, 'value'))
display(caption, range1, range2)
# NO LAG VERSION
caption = widgets.Latex(value = 'Changes in source_range values are reflected in target_range1')
source_range = widgets.IntSlider(description='Source range')
target_range1 = widgets.IntSlider(description='Target range ')
dl = widgets.jsdlink((source_range, 'value'), (target_range1, 'value'))
display(caption, source_range, target_range1)
l.unlink()
dl.unlink()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's write routines to compute supply and demand as functions of price and parameters
Step2: Next we'll write a function that takes a parameter set and returns a market clearing price via bisection
Step3: Let's test it with the original parameter set, the market clearing price for which was 2.9334. The parameters are
Step4: Let's see this visually. First we import the plotting library matplotlib in the standard way
Step5: The next command is a Jupyter "line magic" that tells Jupyter to display figures in the browser
Step6: Now let's plot supply and demand on a grid of points
Step7: Now let's output market clearing prices for all parameter configurations given in exercise 1 of the homework.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.optimize import bisect
def supply(price, b):
return np.exp(b * price) - 1
def demand(price, a, epsilon):
return a * price**(-epsilon)
def compute_equilibrium(a, b, epsilon):
plow = 0.1
phigh = 10.0
def excess_supply(price):
return supply(price, b) - demand(price, a, epsilon)
pclear = bisect(excess_supply, plow, phigh)
return pclear
compute_equilibrium(1, 0.1, 1)
import matplotlib.pyplot as plt
%matplotlib inline
grid_size = 100
grid = np.linspace(2, 4, grid_size)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(grid, demand(grid, 1, 1), 'b-', label='demand')
ax.plot(grid, supply(grid, 0.1), 'g-', label='supply')
ax.set_xlabel('price', fontsize=14)
ax.set_ylabel('quantity', fontsize=14)
ax.legend(loc='upper center', frameon=False)
parameter_list = [[1, 0.1, 1],
[2, 0.1, 1],
[1, 0.2, 1],
[1, 0.1, 2]]
for parameters in parameter_list:
print("Price = {}".format(compute_equilibrium(*parameters)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Defining your own magic
Step3: Exercise
Step6: Cell Magic
Step7: Excercise
Step9: Executing Notebooks
Step10: A notebook is just a dictionary with attribute access for convenience.
Step11: We can see all the cells and their type
Step12: Now I can run all of the code cells with get_ipython().run_cell
Step13: And we can now use the function that was defined in that notebook
Step14: Exercise
|
<ASSISTANT_TASK:>
Python Code:
%lsmagic
import numpy
%timeit A=numpy.random.random((1000,1000))
%%timeit -n 1
A=numpy.random.random((1000,1000))
b = A.sum()
ip = get_ipython()
import time
def sleep_magic(line):
A simple function for sleeping
t = float(line)
time.sleep(t)
ip.register_magic_function?
ip.register_magic_function(sleep_magic, "line", "sleep")
%sleep 2
%sleep?
%load soln/tictocf.py
import numpy as np
import sys
for p in range(1,4):
N = 10**p
print("N=%i" % N)
sys.stdout.flush()
%tic
A = np.random.random((N,N))
np.linalg.eigvals(A)
%toc
def dummy_cell_magic(line, cell):
dummy cell magic for displaying the line and cell it is passed
print("line: %r" % line)
print("cell: %r" % cell)
ip.register_magic_function(dummy_cell_magic, "cell", "dummy")
%%dummy this is the line
this
is the
cell
def parse_magic_line(line):
parse a magic line into a name and eval'd expression
name, values_s = line.split(None, 1)
values = eval(values_s, get_ipython().user_ns)
return name, values
parse_magic_line("x range(5)")
%load soln/scalemagic.py
%%scale N [ int(10**p) for p in range(1,4) ]
A = np.random.random((N,N))
np.linalg.eigvals(A)
%%scale N [ int(2**p) for p in np.linspace(6, 11, 11) ]
A = np.random.random((N,N))
np.linalg.eigvals(A)
import io
import os
import IPython.nbformat as nbf
def load_notebook(filename):
load a notebook object from a filename
if not os.path.exists(filename) and not filename.endswith(".ipynb"):
filename = filename + ".ipynb"
with io.open(filename) as f:
return nbf.read(f, as_version=4)
nb = load_notebook("_Sample")
nb.keys()
cells = nb.cells
cells
for cell in cells:
print()
print('----- %s -----' % cell.cell_type)
print(cell.source)
for cell in cells:
ip = get_ipython()
if cell.cell_type == 'code':
ip.run_cell(cell.source, silent=True)
nb_info(nb)
%load soln/nbrun.py
%nbrun _Sample
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Factor to downsample the temporal dimension of the TFR computed by
Step3: Compute statistic
Step4: View time-frequency plots
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2],
n_permutations=100, threshold=threshold, tail=0)
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
weights=[1, -1])
evoked_contrast.plot(axes=ax2, time_unit='s')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First download the file and load it in memory to view it. Note that it is not necessary to load the file in memory in order to perform motion correction. Here we load it to inspect it. Viewing the file occurs with OpenCV and will a open a new window. To exit click on the video and press q.
Step2: Now set some parameters that are used for motion correction.
Step3: Note that here the data presented here has been downsampled in space by a factor of 2 to reduce the file size. As a result the spatial resolution is coarser here (around 2 microns per pixel). If we were operating at the original resolution, several of the parameters above, e.g., max_shifts, strides, overlaps, max_deviation_rigid, could have been larger by a factor of 2.
Step4: We first need to create a motion correction object with the parameters specified above. We pass directly its input arguments in the constructor below. Alternatively, we can use the params object and construct it by passing the arguments of params.motion. See the notebook demo_pipeline.ipynb for an example of this usage.
Step5: <h1> Rigid motion correction</h1>
Step6: The motion corrected file is automatically save as memory mapped file in the location given by mc.mmap_file. The rigid shifts are also save in mc.shifts_rig.
Step7: plot the shifts computed by rigid registration
Step8: Piecewise rigid registration
Step9: Now concatenate all the movies (raw, rigid, and pw-rigid) for inspection
Step10: From the movie we can see that pw-rigid registration corrected for the non uniform motion of the data. This was done by estimating different displacement vectors for the different patches in the FOV. This can be visualized by plotting all the computed shifts were a dispersion in the shifts in the y direction is apparent. In this case, the shifts along the two axes are stored in mc.x_shifts_els and mc.y_shifts_els, respectively.
Step11: The improvement in performance can also be seen by a more crisp summary statistic image. Below we plot the correlation images for the three datasets.
Step12: Quality assessment
Step13: Plot correlation with mean frame for each dataset
|
<ASSISTANT_TASK:>
Python Code:
from builtins import zip
from builtins import str
from builtins import map
from builtins import range
from past.utils import old_div
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import psutil
import scipy
from skimage.external.tifffile import TiffFile
import sys
import time
import logging
try:
cv2.setNumThreads(0)
except:
pass
try:
if __IPYTHON__:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
pass
logging.basicConfig(format=
"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s",
# filename="/tmp/caiman.log",
level=logging.DEBUG)
import caiman as cm
from caiman.motion_correction import MotionCorrect, tile_and_correct, motion_correction_piecewise
from caiman.utils.utils import download_demo
fnames = 'Sue_2x_3000_40_-46.tif'
fnames = [download_demo(fnames)] # the file will be downloaded if it doesn't already exist
m_orig = cm.load_movie_chain(fnames)
downsample_ratio = .2 # motion can be perceived better when downsampling in time
m_orig.resize(1, 1, downsample_ratio).play(q_max=99.5, fr=30, magnification=2) # play movie (press q to exit)
max_shifts = (6, 6) # maximum allowed rigid shift in pixels (view the movie to get a sense of motion)
strides = (48, 48) # create a new patch every x pixels for pw-rigid correction
overlaps = (24, 24) # overlap between pathes (size of patch strides+overlaps)
num_frames_split = 100 # length in frames of each chunk of the movie (to be processed in parallel)
max_deviation_rigid = 3 # maximum deviation allowed for patch with respect to rigid shifts
pw_rigid = False # flag for performing rigid or piecewise rigid motion correction
shifts_opencv = True # flag for correcting motion using bicubic interpolation (otherwise FFT interpolation is used)
border_nan = 'copy' # replicate values along the boundary (if True, fill in with NaN)
#%% start the cluster (if a cluster already exists terminate it)
if 'dview' in locals():
cm.stop_server(dview=dview)
c, dview, n_processes = cm.cluster.setup_cluster(
backend='local', n_processes=None, single_thread=False)
# create a motion correction object
mc = MotionCorrect(fnames, dview=dview, max_shifts=max_shifts,
strides=strides, overlaps=overlaps,
max_deviation_rigid=max_deviation_rigid,
shifts_opencv=shifts_opencv, nonneg_movie=True,
border_nan=border_nan)
%%capture
# correct for rigid motion correction and save the file (in memory mapped form)
mc.motion_correct(save_movie=True)
# load motion corrected movie
m_rig = cm.load(mc.mmap_file)
bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int)
#%% visualize templates
plt.figure(figsize = (20,10))
plt.imshow(mc.total_template_rig, cmap = 'gray')
#%% inspect movie
m_rig.resize(1, 1, downsample_ratio).play(
q_max=99.5, fr=30, magnification=2, bord_px = 0*bord_px_rig) # press q to exit
#%% plot rigid shifts
plt.close()
plt.figure(figsize = (20,10))
plt.plot(mc.shifts_rig)
plt.legend(['x shifts','y shifts'])
plt.xlabel('frames')
plt.ylabel('pixels')
%%capture
#%% motion correct piecewise rigid
mc.pw_rigid = True # turn the flag to True for pw-rigid motion correction
mc.template = mc.mmap_file # use the template obtained before to save in computation (optional)
mc.motion_correct(save_movie=True, template=mc.total_template_rig)
m_els = cm.load(mc.fname_tot_els)
m_els.resize(1, 1, downsample_ratio).play(
q_max=99.5, fr=30, magnification=2,bord_px = bord_px_rig)
cm.concatenate([m_orig.resize(1, 1, downsample_ratio) - mc.min_mov*mc.nonneg_movie,
m_rig.resize(1, 1, downsample_ratio), m_els.resize(
1, 1, downsample_ratio)], axis=2).play(fr=60, q_max=99.5, magnification=2, bord_px=bord_px_rig)
#%% visualize elastic shifts
plt.close()
plt.figure(figsize = (20,10))
plt.subplot(2, 1, 1)
plt.plot(mc.x_shifts_els)
plt.ylabel('x shifts (pixels)')
plt.subplot(2, 1, 2)
plt.plot(mc.y_shifts_els)
plt.ylabel('y_shifts (pixels)')
plt.xlabel('frames')
#%% compute borders to exclude
bord_px_els = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)),
np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
plt.figure(figsize = (20,10))
plt.subplot(1,3,1); plt.imshow(m_orig.local_correlations(eight_neighbours=True, swap_dim=False))
plt.subplot(1,3,2); plt.imshow(m_rig.local_correlations(eight_neighbours=True, swap_dim=False))
plt.subplot(1,3,3); plt.imshow(m_els.local_correlations(eight_neighbours=True, swap_dim=False))
cm.stop_server(dview=dview) # stop the server
%%capture
#% compute metrics for the results (TAKES TIME!!)
final_size = np.subtract(mc.total_template_els.shape, 2 * bord_px_els) # remove pixels in the boundaries
winsize = 100
swap_dim = False
resize_fact_flow = .2 # downsample for computing ROF
tmpl_rig, correlations_orig, flows_orig, norms_orig, crispness_orig = cm.motion_correction.compute_metrics_motion_correction(
fnames[0], final_size[0], final_size[1], swap_dim, winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
tmpl_rig, correlations_rig, flows_rig, norms_rig, crispness_rig = cm.motion_correction.compute_metrics_motion_correction(
mc.fname_tot_rig[0], final_size[0], final_size[1],
swap_dim, winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
tmpl_els, correlations_els, flows_els, norms_els, crispness_els = cm.motion_correction.compute_metrics_motion_correction(
mc.fname_tot_els[0], final_size[0], final_size[1],
swap_dim, winsize=winsize, play_flow=False, resize_fact_flow=resize_fact_flow)
plt.figure(figsize = (20,10))
plt.subplot(211); plt.plot(correlations_orig); plt.plot(correlations_rig); plt.plot(correlations_els)
plt.legend(['Original','Rigid','PW-Rigid'])
plt.subplot(223); plt.scatter(correlations_orig, correlations_rig); plt.xlabel('Original');
plt.ylabel('Rigid'); plt.plot([0.3,0.7],[0.3,0.7],'r--')
axes = plt.gca(); axes.set_xlim([0.3,0.7]); axes.set_ylim([0.3,0.7]); plt.axis('square');
plt.subplot(224); plt.scatter(correlations_rig, correlations_els); plt.xlabel('Rigid');
plt.ylabel('PW-Rigid'); plt.plot([0.3,0.7],[0.3,0.7],'r--')
axes = plt.gca(); axes.set_xlim([0.3,0.7]); axes.set_ylim([0.3,0.7]); plt.axis('square');
# print crispness values
print('Crispness original: '+ str(int(crispness_orig)))
print('Crispness rigid: '+ str(int(crispness_rig)))
print('Crispness elastic: '+ str(int(crispness_els)))
#%% plot the results of Residual Optical Flow
fls = [mc.fname_tot_els[0][:-4] + '_metrics.npz', mc.fname_tot_rig[0][:-4] +
'_metrics.npz', mc.fname[0][:-4] + '_metrics.npz']
plt.figure(figsize = (20,10))
for cnt, fl, metr in zip(range(len(fls)),fls,['pw_rigid','rigid','raw']):
with np.load(fl) as ld:
print(ld.keys())
print(fl)
print(str(np.mean(ld['norms'])) + '+/-' + str(np.std(ld['norms'])) +
' ; ' + str(ld['smoothness']) + ' ; ' + str(ld['smoothness_corr']))
plt.subplot(len(fls), 3, 1 + 3 * cnt)
plt.ylabel(metr)
try:
mean_img = np.mean(
cm.load(fl[:-12] + 'mmap'), 0)[12:-12, 12:-12]
except:
try:
mean_img = np.mean(
cm.load(fl[:-12] + '.tif'), 0)[12:-12, 12:-12]
except:
mean_img = np.mean(
cm.load(fl[:-12] + 'hdf5'), 0)[12:-12, 12:-12]
lq, hq = np.nanpercentile(mean_img, [.5, 99.5])
plt.imshow(mean_img, vmin=lq, vmax=hq)
plt.title('Mean')
plt.subplot(len(fls), 3, 3 * cnt + 2)
plt.imshow(ld['img_corr'], vmin=0, vmax=.35)
plt.title('Corr image')
plt.subplot(len(fls), 3, 3 * cnt + 3)
#plt.plot(ld['norms'])
#plt.xlabel('frame')
#plt.ylabel('norm opt flow')
#plt.subplot(len(fls), 3, 3 * cnt + 3)
flows = ld['flows']
plt.imshow(np.mean(
np.sqrt(flows[:, :, :, 0]**2 + flows[:, :, :, 1]**2), 0), vmin=0, vmax=0.3)
plt.colorbar()
plt.title('Mean optical flow')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Learn to calculate with seq2seq model
Step2: To check the corectness of your implementation, use test_generate_equations function
Step3: Finally, we are ready to generate the train and test data for the neural network
Step4: Prepare data for the neural network
Step5: Special symbols
Step7: You could notice that we have added 3 special symbols
Step8: Check that your implementation is correct
Step10: We also need to be able to get back from indices to symbols
Step12: Generating batches
Step13: The function generate_batches will help to generate batches with defined size from given samples.
Step14: To illustrate the result of the implemented functions, run the following cell
Step15: Encoder-Decoder architecture
Step16: Let us use TensorFlow building blocks to specify the network architecture.
Step18: First, we need to create placeholders to specify what data we are going to feed into the network during the exectution time. For this task we will need
Step20: Now, let us specify the layers of the neural network. First, we need to prepare an embedding matrix. Since we use the same vocabulary for input and output, we need only one such matrix. For tasks with different vocabularies there would be multiple embedding layers.
Step22: Encoder
Step25: Decoder
Step27: In this task we will use sequence_loss, which is a weighted cross-entropy loss for a sequence of logits. Take a moment to understand, what is your train logits and targets. Also note, that we do not want to take into account loss terms coming from padding symbols, so we will mask the out using weights.
Step29: The last thing to specify is the optimization of the defined loss.
Step30: Congratulations! You have specified all the parts of your network. You may have noticed, that we didn't deal with any real data yet, so what you have written is just recipies on how the network should function.
Step31: Train the network and predict output
Step32: We implemented two predictions functions
Step33: Run your experiment
Step34: Finally, we are ready to run the training! A good indicator that everything works fine is decreasing loss during the training. You should account on the loss value equal to approximately 2.7 at the beginning of the training and near 1 after the 10th epoch.
Step35: Evaluate results
|
<ASSISTANT_TASK:>
Python Code:
import random
def generate_equations(allowed_operators, dataset_size, min_value, max_value):
Generates pairs of equations and solutions to them.
Each equation has a form of two integers with an operator in between.
Each solution is an integer with the result of the operaion.
allowed_operators: list of strings, allowed operators.
dataset_size: an integer, number of equations to be generated.
min_value: an integer, min value of each operand.
min_value: an integer, max value of each operand.
result: a list of tuples of strings (equation, solution).
sample = []
for _ in range(dataset_size):
######################################
######### YOUR CODE HERE #############
######################################
return sample
def test_generate_equations():
allowed_operators = ['+', '-']
dataset_size = 10
for (input_, output_) in generate_equations(allowed_operators, dataset_size, 0, 100):
if not (type(input_) is str and type(output_) is str):
return "Both parts should be strings."
if eval(input_) != int(output_):
return "The (equation: {!r}, solution: {!r}) pair is incorrect.".format(input_, output_)
return "Tests passed."
print(test_generate_equations())
from sklearn.model_selection import train_test_split
allowed_operators = ['+', '-']
dataset_size = 100000
data = generate_equations(allowed_operators, dataset_size, min_value=0, max_value=9999)
train_set, test_set = train_test_split(data, test_size=0.2, random_state=42)
word2id = {symbol:i for i, symbol in enumerate('^$#+-1234567890')}
id2word = {i:symbol for symbol, i in word2id.items()}
start_symbol = '^'
end_symbol = '$'
padding_symbol = '#'
def sentence_to_ids(sentence, word2id, padded_len):
Converts a sequence of symbols to a padded sequence of their ids.
sentence: a string, input/output sequence of symbols.
word2id: a dict, a mapping from original symbols to ids.
padded_len: an integer, a desirable length of the sequence.
result: a tuple of (a list of ids, an actual length of sentence).
sent_ids = ######### YOUR CODE HERE #############
sent_len = ######### YOUR CODE HERE #############
return sent_ids, sent_len
def test_sentence_to_ids():
sentences = [("123+123", 7), ("123+123", 8), ("123+123", 10)]
expected_output = [([5, 6, 7, 3, 5, 6, 1], 7),
([5, 6, 7, 3, 5, 6, 7, 1], 8),
([5, 6, 7, 3, 5, 6, 7, 1, 2, 2], 8)]
for (sentence, padded_len), (sentence_ids, expected_length) in zip(sentences, expected_output):
output, length = sentence_to_ids(sentence, word2id, padded_len)
if output != sentence_ids:
return("Convertion of '{}' for padded_len={} to {} is incorrect.".format(
sentence, padded_len, output))
if length != expected_length:
return("Convertion of '{}' for padded_len={} has incorrect actual length {}.".format(
sentence, padded_len, length))
return("Tests passed.")
print(test_sentence_to_ids())
def ids_to_sentence(ids, id2word):
Converts a sequence of ids to a sequence of symbols.
ids: a list, indices for the padded sequence.
id2word: a dict, a mapping from ids to original symbols.
result: a list of symbols.
return [id2word[i] for i in ids]
def batch_to_ids(sentences, word2id, max_len):
Prepares batches of indices.
Sequences are padded to match the longest sequence in the batch,
if it's longer than max_len, then max_len is used instead.
sentences: a list of strings, original sequences.
word2id: a dict, a mapping from original symbols to ids.
max_len: an integer, max len of sequences allowed.
result: a list of lists of ids, a list of actual lengths.
max_len_in_batch = min(max(len(s) for s in sentences) + 1, max_len)
batch_ids, batch_ids_len = [], []
for sentence in sentences:
ids, ids_len = sentence_to_ids(sentence, word2id, max_len_in_batch)
batch_ids.append(ids)
batch_ids_len.append(ids_len)
return batch_ids, batch_ids_len
def generate_batches(samples, batch_size=64):
X, Y = [], []
for i, (x, y) in enumerate(samples, 1):
X.append(x)
Y.append(y)
if i % batch_size == 0:
yield X, Y
X, Y = [], []
if X and Y:
yield X, Y
sentences = train_set[0]
ids, sent_lens = batch_to_ids(sentences, word2id, max_len=10)
print('Input:', sentences)
print('Ids: {}\nSentences lengths: {}'.format(ids, sent_lens))
import tensorflow as tf
class Seq2SeqModel(object):
pass
def declare_placeholders(self):
Specifies placeholders for the model.
# Placeholders for input and its actual lengths.
self.input_batch = tf.placeholder(shape=(None, None), dtype=tf.int32, name='input_batch')
self.input_batch_lengths = tf.placeholder(shape=(None, ), dtype=tf.int32, name='input_batch_lengths')
# Placeholders for groundtruth and its actual lengths.
self.ground_truth = ######### YOUR CODE HERE #############
self.ground_truth_lengths = ######### YOUR CODE HERE #############
self.dropout_ph = tf.placeholder_with_default(1.0, shape=[])
self.learning_rate_ph = ######### YOUR CODE HERE #############
Seq2SeqModel.__declare_placeholders = classmethod(declare_placeholders)
def create_embeddings(self, vocab_size, embeddings_size):
Specifies embeddings layer and embeds an input batch.
random_initializer = tf.random_uniform((vocab_size, embeddings_size), -1.0, 1.0)
self.embeddings = ######### YOUR CODE HERE #############
# Perform embeddings lookup for self.input_batch.
self.input_batch_embedded = ######### YOUR CODE HERE #############
Seq2SeqModel.__create_embeddings = classmethod(create_embeddings)
def build_encoder(self, hidden_size):
Specifies encoder architecture and computes its output.
# Create GRUCell with dropout.
encoder_cell = ######### YOUR CODE HERE #############
# Create RNN with the predefined cell.
_, self.final_encoder_state = ######### YOUR CODE HERE #############
Seq2SeqModel.__build_encoder = classmethod(build_encoder)
def build_decoder(self, hidden_size, vocab_size, max_iter, start_symbol_id, end_symbol_id):
Specifies decoder architecture and computes the output.
Uses different helpers:
- for train: feeding ground truth
- for inference: feeding generated output
As a result, self.train_outputs and self.infer_outputs are created.
Each of them contains two fields:
rnn_output (predicted logits)
sample_id (predictions).
# Use start symbols as the decoder inputs at the first time step.
batch_size = tf.shape(self.input_batch)[0]
start_tokens = tf.fill([batch_size], start_symbol_id)
ground_truth_as_input = tf.concat([tf.expand_dims(start_tokens, 1), self.ground_truth], 1)
# Use the embedding layer defined before to lookup embedings for ground_truth_as_input.
self.ground_truth_embedded = ######### YOUR CODE HERE #############
# Create TrainingHelper for the train stage.
train_helper = tf.contrib.seq2seq.TrainingHelper(self.ground_truth_embedded,
self.ground_truth_lengths)
# Create GreedyEmbeddingHelper for the inference stage.
# You should provide the embedding layer, start_tokens and index of the end symbol.
infer_helper = ######### YOUR CODE HERE #############
def decode(helper, scope, reuse=None):
Creates decoder and return the results of the decoding with a given helper.
with tf.variable_scope(scope, reuse=reuse):
# Create GRUCell with dropout. Do not forget to set the reuse flag properly.
decoder_cell = ######### YOUR CODE HERE #############
# Create a projection wrapper.
decoder_cell = tf.contrib.rnn.OutputProjectionWrapper(decoder_cell, vocab_size, reuse=reuse)
# Create BasicDecoder, pass the defined cell, a helper, and initial state.
# The initial state should be equal to the final state of the encoder!
decoder = ######### YOUR CODE HERE #############
# The first returning argument of dynamic_decode contains two fields:
# rnn_output (predicted logits)
# sample_id (predictions)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=decoder, maximum_iterations=max_iter,
output_time_major=False, impute_finished=True)
return outputs
self.train_outputs = decode(train_helper, 'decode')
self.infer_outputs = decode(infer_helper, 'decode', reuse=True)
Seq2SeqModel.__build_decoder = classmethod(build_decoder)
def compute_loss(self):
Computes sequence loss (masked cross-entopy loss with logits).
weights = tf.cast(tf.sequence_mask(self.ground_truth_lengths), dtype=tf.float32)
self.loss = ######### YOUR CODE HERE #############
Seq2SeqModel.__compute_loss = classmethod(compute_loss)
def perform_optimization(self):
Specifies train_op that optimizes self.loss.
self.train_op = ######### YOUR CODE HERE #############
Seq2SeqModel.__perform_optimization = classmethod(perform_optimization)
def init_model(self, vocab_size, embeddings_size, hidden_size,
max_iter, start_symbol_id, end_symbol_id, padding_symbol_id):
self.__declare_placeholders()
self.__create_embeddings(vocab_size, embeddings_size)
self.__build_encoder(hidden_size)
self.__build_decoder(hidden_size, vocab_size, max_iter, start_symbol_id, end_symbol_id)
# Compute loss and back-propagate.
self.__compute_loss()
self.__perform_optimization()
# Get predictions for evaluation.
self.train_predictions = self.train_outputs.sample_id
self.infer_predictions = self.infer_outputs.sample_id
Seq2SeqModel.__init__ = classmethod(init_model)
def train_on_batch(self, session, X, X_seq_len, Y, Y_seq_len, learning_rate, dropout_keep_probability):
feed_dict = {
self.input_batch: X,
self.input_batch_lengths: X_seq_len,
self.ground_truth: Y,
self.ground_truth_lengths: Y_seq_len,
self.learning_rate_ph: learning_rate,
self.dropout_ph: dropout_keep_probability
}
pred, loss, _ = session.run([
self.train_predictions,
self.loss,
self.train_op], feed_dict=feed_dict)
return pred, loss
Seq2SeqModel.train_on_batch = classmethod(train_on_batch)
def predict_for_batch(self, session, X, X_seq_len):
feed_dict = ######### YOUR CODE HERE #############
pred = session.run([
self.infer_predictions
], feed_dict=feed_dict)[0]
return pred
def predict_for_batch_with_loss(self, session, X, X_seq_len, Y, Y_seq_len):
feed_dict = ######### YOUR CODE HERE #############
pred, loss = session.run([
self.infer_predictions,
self.loss,
], feed_dict=feed_dict)
return pred, loss
Seq2SeqModel.predict_for_batch = classmethod(predict_for_batch)
Seq2SeqModel.predict_for_batch_with_loss = classmethod(predict_for_batch_with_loss)
tf.reset_default_graph()
model = ######### YOUR CODE HERE #############
batch_size = ######### YOUR CODE HERE #############
n_epochs = ######### YOUR CODE HERE #############
learning_rate = ######### YOUR CODE HERE #############
dropout_keep_probability = ######### YOUR CODE HERE #############
max_len = ######### YOUR CODE HERE #############
n_step = int(len(train_set) / batch_size)
session = tf.Session()
session.run(tf.global_variables_initializer())
invalid_number_prediction_counts = []
all_model_predictions = []
all_ground_truth = []
print('Start training... \n')
for epoch in range(n_epochs):
random.shuffle(train_set)
random.shuffle(test_set)
print('Train: epoch', epoch + 1)
for n_iter, (X_batch, Y_batch) in enumerate(generate_batches(train_set, batch_size=batch_size)):
######################################
######### YOUR CODE HERE #############
######################################
# prepare the data (X_batch and Y_batch) for training
# using function batch_to_ids
predictions, loss = ######### YOUR CODE HERE #############
if n_iter % 200 == 0:
print("Epoch: [%d/%d], step: [%d/%d], loss: %f" % (epoch + 1, n_epochs, n_iter + 1, n_step, loss))
X_sent, Y_sent = next(generate_batches(test_set, batch_size=batch_size))
######################################
######### YOUR CODE HERE #############
######################################
# prepare test data (X_sent and Y_sent) for predicting
# quality and computing value of the loss function
# using function batch_to_ids
predictions, loss = ######### YOUR CODE HERE #############
print('Test: epoch', epoch + 1, 'loss:', loss,)
for x, y, p in list(zip(X, Y, predictions))[:3]:
print('X:',''.join(ids_to_sentence(x, id2word)))
print('Y:',''.join(ids_to_sentence(y, id2word)))
print('O:',''.join(ids_to_sentence(p, id2word)))
print('')
model_predictions = []
ground_truth = []
invalid_number_prediction_count = 0
# For the whole test set calculate ground-truth values (as integer numbers)
# and prediction values (also as integers) to calculate metrics.
# If generated by model number is not correct (e.g. '1-1'),
# increase err counter and don't append this and corresponding
# ground-truth value to the arrays.
for X_batch, Y_batch in generate_batches(test_set, batch_size=batch_size):
######################################
######### YOUR CODE HERE #############
######################################
all_model_predictions.append(model_predictions)
all_ground_truth.append(ground_truth)
invalid_number_prediction_counts.append(invalid_number_prediction_count)
print('\n...training finished.')
from sklearn.metrics import mean_absolute_error
for i, (gts, predictions, invalid_number_prediction_count) in enumerate(zip(all_ground_truth,
all_model_predictions,
invalid_number_prediction_counts), 1):
mae = ######### YOUR CODE HERE #############
print("Epoch: %i, MAE: %f, Invalid numbers: %i" % (i, mae, invalid_number_prediction_count))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Populate arrays with specific numbers
Step2: You can also use np.array to create a two-dimensional matrix. To create a two-dimensional matrix, specify an extra layer of square brackets. For example, the following call creates a 3x2 matrix
Step3: To populate a matrix with all zeroes, call np.zeros. To populate a matrix with all ones, call np.ones.
Step4: Notice that np.arange generates a sequence that includes the lower bound (5) but not the upper bound (12).
Step5: Note that the highest generated integer np.random.randint is one less than the high argument.
Step6: Mathematical Operations on NumPy Operands
Step7: The following operation also relies on broadcasting to multiply each cell in a vector by 3
Step8: Task 1
Step9: Task 2
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
one_dimensional_array = np.array([1.2, 2.4, 3.5, 4.7, 6.1, 7.2, 8.3, 9.5])
print(one_dimensional_array)
two_dimensional_array = np.array([[6, 5], [11, 7], [4, 8]])
print(two_dimensional_array)
sequence_of_integers = np.arange(5, 12)
print(sequence_of_integers)
random_integers_between_50_and_100 = np.random.randint(low=50, high=101, size=(6))
print(random_integers_between_50_and_100)
random_floats_between_0_and_1 = np.random.random([6])
print(random_floats_between_0_and_1)
random_floats_between_2_and_3 = random_floats_between_0_and_1 + 2.0
print(random_floats_between_2_and_3)
random_integers_between_150_and_300 = random_integers_between_50_and_100 * 3
print(random_integers_between_150_and_300)
feature = ? # write your code here
print(feature)
label = ? # write your code here
print(label)
#@title Double-click to see a possible solution to Task 1.
feature = np.arange(6, 21)
print(feature)
label = (feature * 3) + 4
print(label)
noise = ? # write your code here
print(noise)
label = ? # write your code here
print(label)
#@title Double-click to see a possible solution to Task 2.
noise = (np.random.random([15]) * 4) - 2
print(noise)
label = label + noise
print(label)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='discrete'></a>
Step2: <a id='discrete_unif'></a>
Step3: Example. DiscreteUniform(a = 1, b) pmf.
Step4: <a id='bernoulli'></a>
Step5: <a id='binomial'></a>
Step6: Example. Binomial(n=10, p) pmf.
Step7: Example. Binomial(n, p=0.4) pmf.
Step8: <a id='hyper'></a>
Step9: Example. Hypergeometric(n=10, N0=10, N1) pmf.
Step10: Example. Hypergeometric(n=10, N0, N1= 10) pmf.
Step11: <a id='poisson'></a>
Step12: Example. Poisson($\lambda$) pmf.
Step13: <a id='geometric'></a>
Step14: Example. Geometric(p) pmf.
Step15: <a id='negative_binomial'></a>
Step16: Example. NegativeBinomial(r, p=0.5) pmf.
Step17: Example. NegativeBinomial(r=5, p) pmf.
Step18: <a id='pascal'></a>
Step19: Example. Pascal(r, p=0.5) pmf.
Step20: Example. Pascal(r=5, p) pmf.
|
<ASSISTANT_TASK:>
Python Code:
from symbulate import *
%matplotlib inline
P = BoxModel([1, 2, 5, 10], probs=[0.4, 0.2, 0.3, 0.1])
P.sim(10000).tabulate(normalize=True)
RV(P).sim(10000).plot()
sims = RV(DiscreteUniform(a=1, b=6)).sim(10000)
sims.plot()
DiscreteUniform(a=1, b=6).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(DiscreteUniform(a=1, b=6).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(DiscreteUniform(a=1, b=6).var()))
bs = [3, 5, 7]
for b in bs:
DiscreteUniform(a=1, b=b).plot()
plt.legend(["b=" + str(i) for i in bs])
plt.title("Discrete Uniform distributions with a=1")
RV(Bernoulli(0.3)).sim(10000).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(Bernoulli(0.3).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(Bernoulli(0.3).var()))
sims = RV(Binomial(10, 0.4)).sim(10000)
sims.plot()
Binomial(10, 0.4).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(Binomial(10, 0.4).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(Binomial(10, 0.4).var()))
ps = [0.10, 0.50, 0.75]
for p in ps:
Binomial(10, p).plot()
plt.legend(["p=" + str(i) for i in ps])
plt.title("Binomial distributions with n=10")
ns = [5, 10, 15]
for n in ns:
Binomial(n, 0.4).plot()
plt.legend(["n=" + str(i) for i in ns])
plt.title("Binomial distributions with p=0.4")
sims = RV(Hypergeometric(n=5, N0=5, N1=10)).sim(10000)
sims.plot()
Hypergeometric(n=5, N0=5, N1=10).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(Hypergeometric(n=5, N0=5, N1=10).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(Hypergeometric(n=5, N0=5, N1=10).var()))
N1s = [5, 15, 25]
for N1 in N1s:
Hypergeometric(10, 10, N1).plot()
plt.legend(["N1=" + str(i) for i in N1s])
plt.title("Hypergeometric distributions with N0=10, n=10")
N0s = [5, 15, 25]
for N0 in N0s:
Hypergeometric(10, N0, 10).plot()
plt.legend(["N0=" + str(i) for i in N0s])
plt.title("Hypergeometric distributions with N1=10, n=10")
sims = RV(Poisson(lam = 5)).sim(10000)
sims.plot()
Poisson(lam = 5).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(Poisson(lam=5).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(Poisson(lam=5).var()))
lams = [5, 10, 15]
for lam in lams:
Poisson(lam).plot()
plt.legend(["lambda=" + str(i) for i in lams])
plt.title("Poisson distributions with parameter lambda")
sims = RV(Geometric(p=0.4)).sim(10000)
sims.plot()
Geometric(p=0.4).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(Geometric(p=0.4).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(Geometric(p=0.4).var()))
ps = [0.25, 0.50, 0.75]
for p in ps:
Geometric(p).plot()
plt.legend(["p=" + str(i) for i in ps])
plt.title("Geometric distributions with parameter p")
sims = RV(NegativeBinomial(r=3, p=0.4)).sim(10000)
sims.plot()
NegativeBinomial(r=3, p=0.4).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(NegativeBinomial(r=3, p=0.4).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(NegativeBinomial(r=3, p=0.4).var()))
rs = [5, 10, 15]
for r in rs:
NegativeBinomial(r, p=0.5).plot()
plt.legend(["r=" + str(i) for i in rs])
plt.title("Negative Binomial distributions with p=0.5")
ps = [0.25, 0.50, 0.75]
for p in ps:
NegativeBinomial(r=5, p=p).plot()
plt.legend(["p=" + str(i) for i in ps])
plt.title("Negative Binomial distributions with r=5")
sims = RV(Pascal(r=3, p=0.4)).sim(10000)
sims.plot()
Pascal(r = 3, p = 0.4).plot()
print('The simulated mean is: {:.2f}'.format(sims.mean()))
print('The true mean is: {:.2f}'.format(Pascal(r=3, p=0.4).mean()))
print('The simulated variance is: {:.2f}'.format(sims.var()))
print('The true variance is: {:.2f}'.format(Pascal(r=3, p=0.4).var()))
rs = [5, 10, 15]
for r in rs:
Pascal(r, p=0.5).plot()
plt.legend(["r=" + str(i) for i in rs])
plt.title("Pascal distributions with p=0.5")
ps = [0.25, 0.50, 0.75]
for p in ps:
Pascal(r=5, p=p).plot()
plt.legend(["p=" + str(i) for i in ps])
plt.title("Pascal distributions with r=0.5")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: the file contain some major railway stations.
Step2: Tthe projection of the file is epsg
Step3: And now read the borders file, and do the projection.
Step4: Now they are projected and have the same projection.
Step5: And, first try
Step6: Try to change the marker with a circle (marker='o'), and change to red colour.
Step7: And, change to square.
Step8: get some special icon as the marker come with the colouringmap
Step9: to get a list of the icon in an icon set
Step10: to get an icon from the sets and names
Step11: The above xxx_icon will be used as the marker for mapping.
Step12: map the points using the special icons
Step13: map the points according to a column of category
Step14: By default, map_category function will use some default markers for the different categories.
Step15: colouring the points using different colour
Step16: Now, map the points using the 'color' column.
Step17: varying the sizes of the points according to a column with a size number
Step18: Now change the size according to the column "size2"
Step19: Actually, this can also be done by using size_scale=20. (default to 1.).
|
<ASSISTANT_TASK:>
Python Code:
import geopandas as gpd # read and manage attribute table data
import matplotlib.pyplot as plt # prepare the figure
import colouringmap.mapping_point as mpoint # for drawing points
import colouringmap.mapping_polygon as mpoly # for mapping background polygon
import colouringmap.markerset as ms # getting more marker icons
from random import random # just for creating a random colour for demonstration
# the projection of the map, the data is in wgs84(epsg:4326), so need a proj dict for conversion
proj = {u'lon_0': 138, u'ellps': u'WGS84', u'y_0': 0, u'no_defs': True, u'proj': u'eqdc', u'x_0': 0, u'units': u'm', u'lat_2': 40, u'lat_1': 34, u'lat_0': 0}
## magic line for matplotlib
%matplotlib inline
stations = gpd.read_file('data/tweets_hotspot_station.shp')
stations.head()
stations.crs
stations = stations.to_crs(proj)
borders = gpd.read_file('data/tokyo_special_ward.shp')
borders.head()
borders = borders.to_crs(proj) # convert the borders projection to the same as the stations
print borders.crs==stations.crs # now check again if the two shapefile have the same projection
fig,ax = plt.subplots(figsize=(7,7))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
fig,ax = plt.subplots(figsize=(7,7))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_scatter(stations, ax, extend_context=False)
fig,ax = plt.subplots(figsize=(7,7))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_scatter(stations, ax, extend_context=False,
marker='o', size=36, facecolor='red', alpha=.7)
fig,ax = plt.subplots(figsize=(7,7))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_scatter(stations, ax, extend_context=False,
marker='s', size=36, facecolor='red', alpha=.7)
print ms.list_icon_sets()
print ms.list_icon_names('maki')
print ms.list_icon_names('linecons')
rail_icon = ms.get_marker('maki', 'rail')
shop_icon = ms.get_marker('linecons', 'shop')
ms.show_icon(shop_icon, size=36, face_colour='green')
ms.show_icon(rail_icon, size=48)
fig,ax = plt.subplots(figsize=(12,12))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_scatter(stations, ax, extend_context=False,
marker=shop_icon, size=12, facecolor='red', alpha=.9)
fig,ax = plt.subplots(figsize=(12,12))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_scatter(stations, ax, extend_context=False,
marker=rail_icon, size=24, facecolor='#4b0101', alpha=.9)
fig,ax = plt.subplots(figsize=(12,12))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_category(stations,'Company', ax, size=48, extend_context=False)
fig,ax = plt.subplots(figsize=(12,12))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_category(stations,'Company', ax, size=28, extend_context=False,
cat_order=['Tokyo Metro', 'Toei'], # category order
marker_order=[shop_icon, rail_icon],
size_order=[24,30],
colour_order=['r', 'g'])
col_list = []
for i in range(len(stations)):
r = random()
g = random()
b = random()
col_list.append((r,g,b))
stations['color'] = col_list
fig,ax = plt.subplots(figsize=(12,12))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_colour(stations, 'color', ax, extend_context=False,
marker=rail_icon, size=24,alpha=.9)
stemp = stations['DistanceBe'].tolist()
stemp2 = [ float(s)*20 for s in stemp ]
stations['size2'] = stemp2
stations['DistanceBe2'] = [ float(s) for s in stemp ]
stations.head()
fig,ax = plt.subplots(figsize=(12,12))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_size(stations, 'size2', ax, extend_context=False,
marker=rail_icon, facecolor='green', alpha=.9)
fig,ax = plt.subplots(figsize=(12,12))
ax = mpoint.prepare_map(ax, map_context=borders, background_colour='grey')
ax = mpoly.map_shape(borders, ax, lw=.1, alpha=.7, fc='#c1c6fc')
ax = mpoint.map_size(stations, 'DistanceBe2', ax, extend_context=False, size_scale=20.,
marker=rail_icon, facecolor='green', alpha=.9)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Transforming Column Names
Step2: Transforming Open and Close Dates
Step3: Adding new features
Step4: Transforming Localization
Step5: Localization Grid (in Km)
Step6: Neighborhood
Step7: To avoid the dummy-trap (and curse of dimensionality). We should drop one of the columns when applying one-hot-encoding depending on the classification method to be used. However, since we are not using a LogisticRegression-like classifier, we'll add all features.
Step8: Category
Step9: Request Type
Step10: Source
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import bokeh
from bokeh.io import output_notebook
output_notebook()
import os
DATA_STREETLIGHT_CASES_URL = 'https://data.sfgov.org/api/views/c53t-rr3f/rows.json?accessType=DOWNLOAD'
DATA_STREETLIGHT_CASES_LOCAL = 'DATA_STREETLIGHT_CASES.json'
data_path = DATA_STREETLIGHT_CASES_URL
if os.path.isfile(DATA_STREETLIGHT_CASES_LOCAL):
data_path = DATA_STREETLIGHT_CASES_LOCAL
import urllib, json
def _load_data(url):
response = urllib.urlopen(url)
raw_data = json.loads(response.read())
columns = [col['name'] for col in raw_data['meta']['view']['columns']]
rows = raw_data['data']
return pd.DataFrame(data=rows, columns=columns)
df = _load_data(data_path)
df.columns = [col.lower().replace(' ', '_') for col in df.columns]
df.columns
df['opened'] = pd.to_datetime(df.opened)
df['opened_dayofweek'] = df.opened.dt.dayofweek
df['opened_month'] = df.opened.dt.month
df['opened_year'] = df.opened.dt.year
df['opened_dayofmonth'] = df.opened.dt.day
df['closed'] = pd.to_datetime(df.closed)
df['closed_dayofweek'] = df.closed.dt.dayofweek
df['closed_month'] = df.closed.dt.month
df['closed_year'] = df.closed.dt.year
df['closed_dayofmonth'] = df.closed.dt.day
df['delta'] = (df.closed - df.opened).dt.days
df['is_open'] = pd.isnull(df.closed)
df['opened_weekend'] = df.opened_dayofweek >= 5
df['closed_weekend'] = df.closed_dayofweek >= 5
df['target'] = df.delta <= 2
from geopy.distance import vincenty
df['latitude'] = df.point.apply(lambda e: float(e[1]))
df['longitude'] = df.point.apply(lambda e: float(e[2]))
min_lat, max_lat = min(df.latitude), max(df.latitude)
min_lng, max_lng = min(df.longitude), max(df.longitude)
def grid(lat, lng):
x = vincenty((lat, min_lng), (lat, lng)).miles
y = vincenty((min_lat, lng), (lat, lng)).miles
return x, y
xy = [grid(lat, lng) for lat, lng in zip(df.latitude.values, df.longitude.values)]
df['loc_x'] = np.array(xy)[:,0]
df['loc_y'] = np.array(xy)[:,1]
df.neighborhood.unique()
dummies = pd.get_dummies(df.neighborhood.str.replace(' ', '_').str.lower(), prefix='neigh_', drop_first=False)
dummies.head()
#del df['neighborhood']
df[dummies.columns] = dummies
df.category.unique()
dummies = pd.get_dummies(df.category.str.replace(' ', '_').str.lower(), prefix='cat_', drop_first=False)
dummies.head()
#del df['category']
df[dummies.columns] = dummies
df.request_type.unique()
tmp = df[['request_type', 'category', 'delta', 'target']]
tmp = tmp.dropna()
vc = tmp.request_type.value_counts()
tmp.loc[vc[tmp.request_type].values < 50, 'request_type'] = 'Others'
pivot = tmp.pivot_table(index='request_type', columns='category', values='target',
aggfunc=sum, fill_value=0)
plt.figure(figsize=(10,6))
sns.heatmap(pivot.astype(int), annot=True, fmt="d", linewidths=.5)
dummies = pd.get_dummies(df.source.str.replace(' ', '_').str.lower(), prefix='source_', drop_first=False)
df[dummies.columns] = dummies
df['status'] = df.status == 'Closed'
original_columns = [u'sid', u'id', u'position', u'created_at', u'created_meta',
u'updated_at', u'updated_meta', u'meta', u'caseid', u'opened',
u'closed', u'status', u'responsible_agency', u'address', u'category',
u'request_type', u'request_details', u'source', u'supervisor_district',
u'neighborhood', u'updated', u'point']
del df['sid']
del df['id']
del df['position']
del df['created_at']
del df['created_meta']
del df['updated_at']
del df['meta']
del df['caseid']
del df['address']
del df['responsible_agency']
del df['request_details']
del df['request_type']
del df['status']
del df['updated']
del df['supervisor_district']
del df['point']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NLTK tools
Step2: Config
Step3: Identifier for storing these features on disk and referring to them later.
Step4: Read data
Step5: NLTK built-in stopwords.
Step6: Build features
Step7: Save features
|
<ASSISTANT_TASK:>
Python Code:
from pygoose import *
import nltk
from collections import Counter
from nltk.corpus import stopwords
nltk.download('stopwords')
project = kg.Project.discover()
feature_list_id = '3rdparty_dasolmar_whq'
df_train = pd.read_csv(project.data_dir + 'train.csv').fillna('')
df_test = pd.read_csv(project.data_dir + 'test.csv').fillna('')
stops = set(stopwords.words("english"))
# If a word appears only once, we ignore it completely (likely a typo)
# Epsilon defines a smoothing constant, which makes the effect of extremely rare words smaller
def get_weight(count, eps=10000, min_count=2):
return 0 if count < min_count else 1 / (count + eps)
def add_word_count(x, df, word):
x['das_q1_' + word] = df['question1'].apply(lambda x: (word in str(x).lower())*1)
x['das_q2_' + word] = df['question2'].apply(lambda x: (word in str(x).lower())*1)
x['das_' + word + '_both'] = x['das_q1_' + word] * x['das_q2_' + word]
train_qs = pd.Series(df_train['question1'].tolist() + df_train['question2'].tolist()).astype(str)
words = (" ".join(train_qs)).lower().split()
counts = Counter(words)
weights = {word: get_weight(count) for word, count in counts.items()}
def word_shares(row):
q1_list = str(row['question1']).lower().split()
q1 = set(q1_list)
q1words = q1.difference(stops)
if len(q1words) == 0:
return '0:0:0:0:0:0:0:0'
q2_list = str(row['question2']).lower().split()
q2 = set(q2_list)
q2words = q2.difference(stops)
if len(q2words) == 0:
return '0:0:0:0:0:0:0:0'
words_hamming = sum(1 for i in zip(q1_list, q2_list) if i[0]==i[1])/max(len(q1_list), len(q2_list))
q1stops = q1.intersection(stops)
q2stops = q2.intersection(stops)
q1_2gram = set([i for i in zip(q1_list, q1_list[1:])])
q2_2gram = set([i for i in zip(q2_list, q2_list[1:])])
shared_2gram = q1_2gram.intersection(q2_2gram)
shared_words = q1words.intersection(q2words)
shared_weights = [weights.get(w, 0) for w in shared_words]
q1_weights = [weights.get(w, 0) for w in q1words]
q2_weights = [weights.get(w, 0) for w in q2words]
total_weights = q1_weights + q1_weights
R1 = np.sum(shared_weights) / np.sum(total_weights) #tfidf share
R2 = len(shared_words) / (len(q1words) + len(q2words) - len(shared_words)) #count share
R31 = len(q1stops) / len(q1words) #stops in q1
R32 = len(q2stops) / len(q2words) #stops in q2
Rcosine_denominator = (np.sqrt(np.dot(q1_weights,q1_weights))*np.sqrt(np.dot(q2_weights,q2_weights)))
Rcosine = np.dot(shared_weights, shared_weights)/Rcosine_denominator
if len(q1_2gram) + len(q2_2gram) == 0:
R2gram = 0
else:
R2gram = len(shared_2gram) / (len(q1_2gram) + len(q2_2gram))
return '{}:{}:{}:{}:{}:{}:{}:{}'.format(R1, R2, len(shared_words), R31, R32, R2gram, Rcosine, words_hamming)
df = pd.concat([df_train, df_test])
df['word_shares'] = df.apply(word_shares, axis=1, raw=True)
x = pd.DataFrame()
x['das_word_match'] = df['word_shares'].apply(lambda x: float(x.split(':')[0]))
x['das_word_match_2root'] = np.sqrt(x['das_word_match'])
x['das_tfidf_word_match'] = df['word_shares'].apply(lambda x: float(x.split(':')[1]))
x['das_shared_count'] = df['word_shares'].apply(lambda x: float(x.split(':')[2]))
x['das_stops1_ratio'] = df['word_shares'].apply(lambda x: float(x.split(':')[3]))
x['das_stops2_ratio'] = df['word_shares'].apply(lambda x: float(x.split(':')[4]))
x['das_shared_2gram'] = df['word_shares'].apply(lambda x: float(x.split(':')[5]))
x['das_cosine'] = df['word_shares'].apply(lambda x: float(x.split(':')[6]))
x['das_words_hamming'] = df['word_shares'].apply(lambda x: float(x.split(':')[7]))
x['das_diff_stops_r'] = np.abs(x['das_stops1_ratio'] - x['das_stops2_ratio'])
x['das_len_q1'] = df['question1'].apply(lambda x: len(str(x)))
x['das_len_q2'] = df['question2'].apply(lambda x: len(str(x)))
x['das_diff_len'] = np.abs(x['das_len_q1'] - x['das_len_q2'])
x['das_caps_count_q1'] = df['question1'].apply(lambda x:sum(1 for i in str(x) if i.isupper()))
x['das_caps_count_q2'] = df['question2'].apply(lambda x:sum(1 for i in str(x) if i.isupper()))
x['das_diff_caps'] = np.abs(x['das_caps_count_q1'] - x['das_caps_count_q2'])
x['das_len_char_q1'] = df['question1'].apply(lambda x: len(str(x).replace(' ', '')))
x['das_len_char_q2'] = df['question2'].apply(lambda x: len(str(x).replace(' ', '')))
x['das_diff_len_char'] = np.abs(x['das_len_char_q1'] - x['das_len_char_q2'])
x['das_len_word_q1'] = df['question1'].apply(lambda x: len(str(x).split()))
x['das_len_word_q2'] = df['question2'].apply(lambda x: len(str(x).split()))
x['das_diff_len_word'] = np.abs(x['das_len_word_q1'] - x['das_len_word_q2'])
x['das_avg_word_len1'] = x['das_len_char_q1'] / x['das_len_word_q1']
x['das_avg_word_len2'] = x['das_len_char_q2'] / x['das_len_word_q2']
x['das_diff_avg_word'] = np.abs(x['das_avg_word_len1'] - x['das_avg_word_len2'])
# x['exactly_same'] = (df['question1'] == df['question2']).astype(int)
# x['duplicated'] = df.duplicated(['question1','question2']).astype(int)
whq_words = ['how', 'what', 'which', 'who', 'where', 'when', 'why']
for whq in whq_words:
add_word_count(x, df, whq)
whq_columns_q1 = ['das_q1_' + whq for whq in whq_words]
whq_columns_q2 = ['das_q2_' + whq for whq in whq_words]
x['whq_count_q1'] = x[whq_columns_q1].sum(axis=1)
x['whq_count_q2'] = x[whq_columns_q2].sum(axis=1)
x['whq_count_diff'] = np.abs(x['whq_count_q1'] - x['whq_count_q2'])
feature_names = list(x.columns.values)
print("Features: {}".format(feature_names))
X_train = x[:df_train.shape[0]].values
X_test = x[df_train.shape[0]:].values
project.save_features(X_train, X_test, feature_names, feature_list_id)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Poincaré's bread
Step2: Here's a sample from the distribution of bread weight if the baker chooses 5 loaves.
Step3: And here's how the observed mean depends on n.
Step4: Looks like n=4 yields a measured mean close to 1000 g.
Step5: Now let's compare the distribution of the sample to the normal distribution with the same mean and std.
Step6: Visually they are almost indistinguishable, which makes it seem unlikely that Poincaré could really tell the difference, especially with only 365 observations.
Step7: As a test statistic, we'll use the sample skew.
Step8: The null hypothesis is that the data come from a normal distribution with the observed mean and standard deviation.
Step9: Now we can generate fake data under the null hypothesis and compute the test statistic.
Step10: And we can estimate the distribution of the test statistic under the null hypothesis
Step11: Here's the 95th percentile of the test statistic under the null hypothesis.
Step12: If Herni observes a skew above this threshold, he would conclude that it is statistically significant.
Step13: Here's what the sampling distribution looks like for the skew Poincaré observes.
Step14: If Poincaré runs this experiment for a year, what is the chance he gets a statistically significant result?
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='white')
from thinkstats2 import Pmf, Cdf
import thinkstats2
import thinkplot
decorate = thinkplot.config
def choose_bread(mean, std, n):
loaves = np.random.normal(mean, std, size=n)
return max(loaves)
mean = 950
std = 50
n = 5
choose_bread(mean, std, n)
sample = [choose_bread(mean, std, n) for i in range(10000)]
np.mean(sample)
for n in range(1, 10):
sample = [choose_bread(mean, std, n) for i in range(10000)]
print(n, np.mean(sample))
n = 4
sample = [choose_bread(mean, std, n) for i in range(10000)]
mu = np.mean(sample)
sigma = np.std(sample)
mu, sigma
from scipy.stats import norm
xs = sample.copy()
xs.sort()
ys = norm.cdf(xs, mu, sigma)
plt.plot(xs, ys, color='gray', label='Gaussian model')
cdf = Cdf(sample, label='Sample with n=4')
thinkplot.Plot(cdf)
decorate(xlabel='Weight in g', ylabel='CDF', title='Distribution of bread weight')
def simulate_year(mean, std, n, days):
sample = [choose_bread(mean, std, n) for i in range(days)]
return sample
from scipy.stats import skew
def test_stat(sample):
return skew(sample)
def run_model(mu, sigma, days):
return np.random.normal(mu, sigma, size=days)
test_stat(run_model(mu, sigma, 365))
skew_under_null = [test_stat(run_model(mu, sigma, 365)) for i in range(1000)]
cdf = Cdf(skew_under_null)
thinkplot.Cdf(cdf)
decorate(xlabel='Skew', ylabel='CDF', title='Distribution of skew under H0')
thresh = np.percentile(skew_under_null, 95)
thresh
n = 4
skew_poincare = [test_stat(simulate_year(mean, std, n, 365)) for i in range(1000)]
cdf = Cdf(skew_poincare)
thinkplot.Cdf(cdf)
decorate(xlabel='Skew', ylabel='CDF', title='Distribution of skew if the baker cheats n=4')
np.mean(skew_poincare > thresh)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create the units
Step2: Connect the units
Step3: Append the nodes and then sort the graph
Step4: Run
|
<ASSISTANT_TASK:>
Python Code:
import pyAudioGraph as ag
# choose a wav file on your pc
wav_file = 'AudioFile.wav' # only signed 16/32 bit supported
world = ag.World(nchannels=2, buf_len=512)
# create the diskInUnit
audioStream = ag.AudioStreamWaveFile(wav_file)
diskInNode = ag.Nodes.DiskInNode(world, audioStream)
# create the output Unit
outNode = ag.Nodes.OutNode(world)
for i in range(diskInNode.nchannels):
diskInNode.w_out[i].plug_into(outNode.w_in[i])
world.append(outNode)
world.sort()
import time
world.start()
time.sleep(20)
world.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: version 1.0.1
Step4: (1b) Configuration and Initial RDD Creation
Step5: (1c) Data Cleaning
Step6: Part 2
Step7: (2b) Example
Step9: (2c) Example
Step10: (2d) Example
Step11: (2e) Example
Step12: (2f) Example
Step13: Part 3
Step14: (3b) Exercise
Step15: (3c) Exercise
Step16: (3d) Exercise
Step17: (3e) Exercise
Step18: (3f) Exercise
Step19: Part 4
Step20: (4b) Exercise
Step21: (4c) Exercise
Step22: (4d) Exercise
Step23: (4e) Exercise
Step24: (4f) Exercise
Step25: (4g) Exercise
Step26: (4h) Exercise
Step27: (4i) Exercise
|
<ASSISTANT_TASK:>
Python Code:
import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
# A regular expression pattern to extract fields from the log line
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
Read and parse log file
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: %d' % failed_logs.count()
for line in failed_logs.take(20):
print 'Invalid logline: %s' % line
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (parsed_logs.count(), access_logs.count(), failed_logs.count())
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
# TODO: Replace <FILL IN> with appropriate code
# This was originally '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)\s*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
# TEST Data cleaning (1c)
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
# Calculate statistics based on the content size.
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: %s' % (
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max())
# Response Code to Count
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found %d response codes' % len(responseCodeToCountList)
print 'Response Code Counts: %s' % responseCodeToCountList
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
return '' if value < 7 else '%.0f%%' % value
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
pass
# Any hosts that has accessed the server more than 10 times.
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: %s' % hostsPick20
# An example: [u'204.120.34.185', u'204.243.249.9', u'slip1-32.acs.ohio-state.edu', u'lapdog-14.baylor.edu', u'199.77.67.3', u'gs1.cs.ttu.edu', u'haskell.limbex.com', u'alfred.uib.no', u'146.129.66.31', u'manaus.bologna.maraut.it', u'dialup98-110.swipnet.se', u'slip-ppp02.feldspar.com', u'ad03-053.compuserve.com', u'srawlin.opsys.nwa.com', u'199.202.200.52', u'ix-den7-23.ix.netcom.com', u'151.99.247.114', u'w20-575-104.mit.edu', u'205.25.227.20', u'ns.rmc.com']
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
pass
# Top Endpoints
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: %s' % topEndpoints
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20292)], 'incorrect Top Ten Endpoints'
# TODO: Replace <FILL IN> with appropriate code
# HINT: Each of these <FILL IN> below could be completed with a single transformation or action.
# You are welcome to structure your solution in a different way, so long as
# you ensure the variables used in the next Test section are defined (ie. endpointSum, topTenErrURLs).
not200 = access_logs.filter(lambda log: log.response_code != 200)
endpointCountPairTuple = not200.map(lambda log: (log.endpoint, 1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a, b : a + b)
topTenErrURLs = endpointSum.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten failed URLs: %s' % topTenErrURLs
# TEST Top ten error endpoints (3a)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
# TODO: Replace <FILL IN> with appropriate code
# HINT: Do you recall the tips from (3a)? Each of these <FILL IN> could be an transformation or action.
hosts = access_logs.map(lambda log: log.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: %d' % uniqueHostCount
# TEST Number of unique hosts (3b)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
# TODO: Replace <FILL IN> with appropriate code
dayToHostPairTuple = access_logs.map(lambda log: (log.date_time.day, log.host)).distinct()
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda (day, hosts): (day, len(hosts)))
dailyHosts = (dayHostCount
.sortByKey()
.cache())
dailyHostsList = dailyHosts.take(30)
print 'Unique hosts per day: %s' % dailyHostsList
# TEST Number of unique daily hosts (3c)
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
# TODO: Replace <FILL IN> with appropriate code
daysWithHosts = dailyHosts.map(lambda log: log[0]).collect()
hosts = dailyHosts.map(lambda log: log[1]).collect()
# TEST Visualizing unique daily hosts (3d)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
pass
# TODO: Replace <FILL IN> with appropriate code
dayAndHostTuple = access_logs.map(lambda log: (log.date_time.day, log.host))
groupedByDay = dayAndHostTuple.groupByKey()
sortedByDay = groupedByDay.sortByKey()
avgDailyReqPerHost = (sortedByDay
.map(lambda(day, requests): (day, len(requests)))
.join(dailyHosts)
.map(lambda(day, (totalRequests, numOfHosts)): (day, totalRequests/numOfHosts))
.sortByKey()
.cache())
avgDailyReqPerHostList = avgDailyReqPerHost.take(30)
print 'Average number of daily requests per Hosts is %s' % avgDailyReqPerHostList
# TEST Average number of daily requests per hosts (3e)
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
# TODO: Replace <FILL IN> with appropriate code
daysWithAvg = avgDailyReqPerHost.map(lambda (day, avg): day).collect()
avgs = avgDailyReqPerHost.map(lambda (day, avg): avg).collect()
# TEST Average Daily Requests per Unique Host (3f)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
pass
# TODO: Replace <FILL IN> with appropriate code
badRecords = (access_logs
.filter(lambda log: log.response_code == 404)
.cache())
print 'Found %d 404 URLs' % badRecords.count()
# TEST Counting 404 (4a)
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
# TODO: Replace <FILL IN> with appropriate code
badEndpoints = badRecords.map(lambda log: log.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: %s' % badUniqueEndpointsPick40
# TEST Listing 404 records (4b)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
# TODO: Replace <FILL IN> with appropriate code
badEndpointsCountPairTuple = badRecords.map(lambda log: (log.endpoint, 1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a, b: a + b)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, lambda a: -a[1])
print 'Top Twenty 404 URLs: %s' % badEndpointsTop20
# TEST Top twenty 404 URLs (4c)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif>', 43), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
# TODO: Replace <FILL IN> with appropriate code
errHostsCountPairTuple = badRecords.map(lambda log: (log.host, 1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a, b: a + b)
errHostsTop25 = errHostsSum.takeOrdered(25, lambda a: -a[1])
print 'Top 25 hosts that generated errors: %s' % errHostsTop25
# TEST Top twenty-five 404 response code hosts (4d)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
# TODO: Replace <FILL IN> with appropriate code
errDateCountPairTuple = badRecords.map(lambda log: (log.date_time.day, 1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a, b: a + b)
errDateSorted = (errDateSum
.sortByKey()
.cache())
errByDate = errDateSorted.collect()
print '404 Errors by day: %s' % errByDate
# TEST 404 response codes per day (4e)
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
# TODO: Replace <FILL IN> with appropriate code
daysWithErrors404 = errDateSorted.map(lambda (day, total): day).collect()
errors404ByDay = errDateSorted.map(lambda (day, total): total).collect()
# TEST Visualizing the 404 Response Codes by Day (4f)
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
# TODO: Replace <FILL IN> with appropriate code
topErrDate = errDateSorted.takeOrdered(5, lambda a: -a[1])
print 'Top Five dates for 404 requests: %s' % topErrDate
# TEST Five dates for 404 requests (4g)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
# TODO: Replace <FILL IN> with appropriate code
hourCountPairTuple = badRecords.map(lambda log: (log.date_time.hour, 1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a, b: a + b)
hourRecordsSorted = (hourRecordsSum
.sortByKey()
.cache())
errHourList = hourRecordsSorted.collect()
print 'Top hours for 404 requests: %s' % errHourList
# TEST Hourly 404 response codes (4h)
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
# TODO: Replace <FILL IN> with appropriate code
hoursWithErrors404 = hourRecordsSorted.map(lambda (hr, total): hr).collect()
errors404ByHours = hourRecordsSorted.map(lambda (hr, total): total).collect()
# TEST Visualizing the 404 Response Codes by Hour (4i)
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot training and test accuracy
|
<ASSISTANT_TASK:>
Python Code:
# If GPU is not available:
# GPU_USE = '/cpu:0'
# config = tf.ConfigProto(device_count = {"GPU": 0})
# If GPU is available:
config = tf.ConfigProto()
config.log_device_placement = True
config.allow_soft_placement = True
config.gpu_options.allocator_type = 'BFC'
# Limit the maximum memory used
config.gpu_options.per_process_gpu_memory_fraction = 0.1
# set session config
tf.keras.backend.set_session(tf.Session(config=config))
########## HYPER PARAMETERS
batch_size = 128
epochs = 10
optimizer = tf.keras.optimizers.RMSprop()
########## HYPER PARAMETERS
########## MODEL ARCHITECTURE
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(5, activation='relu', input_shape=(784,)))
model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
########## MODEL ARCHITECTURE
# Print summary
model.summary()
# compile model for training
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train_one_hot,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test_one_hot))
# use model for inference to get test accuracy
y_test_pred = model.predict(x_test)
y_test_pred = np.argmax(y_test_pred, axis=1)
print ('\n Summary of the precision, recall, F1 score for each class:')
print (sklearn.metrics.classification_report(y_test, y_test_pred))
print ('\n Confusion matrix: ')
print (sklearn.metrics.confusion_matrix(y_test, y_test_pred))
import matplotlib.pyplot as plt
plt.plot(history.history['val_acc'], label="Test Accuracy")
plt.plot(history.history['acc'], label="Training Accuracy")
plt.legend()
# save model
model.save("myModel.h5")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Explore time series data
Step4: The function clean_data below does three things
Step7: Read data and preprocessing
Step11: Make train-eval-test split
Step12: Modeling
Step14: To monitor training progress and compare evaluation metrics for different models, we'll use the function below to plot metrics captured from the training job such as training and validation loss or accuracy.
Step15: Baseline
Step16: Ok. So just naively guessing the most common outcome UP will give about 29.5% accuracy on the validation set.
Step17: The accuracy seems to level out pretty quickly. To report the accuracy, we'll average the accuracy on the validation set across the last few epochs of training.
Step18: Deep Neural Network
Step19: Convolutional Neural Network
Step20: Recurrent Neural Network
Step21: Multi-layer RNN
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install --user google-cloud-bigquery==1.25.0
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
BUCKET = "your-gcp-bucket-here" # REPLACE WITH YOUR BUCKET
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
%env
PROJECT = PROJECT
BUCKET = BUCKET
REGION = REGION
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import bigquery
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, DenseFeatures,
Conv1D, MaxPool1D,
Reshape, RNN,
LSTM, GRU, Bidirectional)
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
# To plot pretty figures
%matplotlib inline
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# For reproducible results.
from numpy.random import seed
seed(1)
tf.random.set_seed(2)
%%time
bq = bigquery.Client(project=PROJECT)
bq_query = '''
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
LIMIT
100
'''
def clean_data(input_df):
Cleans data to prepare for training.
Args:
input_df: Pandas dataframe.
Returns:
Pandas dataframe.
df = input_df.copy()
# Remove inf/na values.
real_valued_rows = ~(df == np.inf).max(axis=1)
df = df[real_valued_rows].dropna()
# TF doesn't accept datetimes in DataFrame.
df['Date'] = pd.to_datetime(df['Date'], errors='coerce')
df['Date'] = df['Date'].dt.strftime('%Y-%m-%d')
# TF requires numeric label.
df['direction_numeric'] = df['direction'].apply(lambda x: {'DOWN': 0,
'STAY': 1,
'UP': 2}[x])
return df
STOCK_HISTORY_COLUMN = 'close_values_prior_260'
COL_NAMES = ['day_' + str(day) for day in range(0, 260)]
LABEL = 'direction_numeric'
def _scale_features(df):
z-scale feature columns of Pandas dataframe.
Args:
features: Pandas dataframe.
Returns:
Pandas dataframe with each column standardized according to the
values in that column.
avg = df.mean()
std = df.std()
return (df - avg) / std
def create_features(df, label_name):
Create modeling features and label from Pandas dataframe.
Args:
df: Pandas dataframe.
label_name: str, the column name of the label.
Returns:
Pandas dataframe
# Expand 1 column containing a list of close prices to 260 columns.
time_series_features = df[STOCK_HISTORY_COLUMN].apply(pd.Series)
# Rename columns.
time_series_features.columns = COL_NAMES
time_series_features = _scale_features(time_series_features)
# Concat time series features with static features and label.
label_column = df[LABEL]
return pd.concat([time_series_features,
label_column], axis=1)
def _create_split(phase):
Create string to produce train/valid/test splits for a SQL query.
Args:
phase: str, either TRAIN, VALID, or TEST.
Returns:
String.
floor, ceiling = '2002-11-01', '2010-07-01'
if phase == 'VALID':
floor, ceiling = '2010-07-01', '2011-09-01'
elif phase == 'TEST':
floor, ceiling = '2011-09-01', '2012-11-30'
return '''
WHERE Date >= '{0}'
AND Date < '{1}'
'''.format(floor, ceiling)
def create_query(phase):
Create SQL query to create train/valid/test splits on subsample.
Args:
phase: str, either TRAIN, VALID, or TEST.
sample_size: str, amount of data to take for subsample.
Returns:
String.
basequery =
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
return basequery + _create_split(phase)
N_TIME_STEPS = 260
N_LABELS = 3
Xtrain = pd.read_csv('stock-train.csv')
Xvalid = pd.read_csv('stock-valid.csv')
ytrain = Xtrain.pop(LABEL)
yvalid = Xvalid.pop(LABEL)
ytrain_categorical = to_categorical(ytrain.values)
yvalid_categorical = to_categorical(yvalid.values)
def plot_curves(train_data, val_data, label='Accuracy'):
Plot training and validation metrics on single axis.
Args:
train_data: list, metrics obtrained from training data.
val_data: list, metrics obtained from validation data.
label: str, title and label for plot.
Returns:
Matplotlib plot.
plt.plot(np.arange(len(train_data)) + 0.5,
train_data,
"b.-", label="Training " + label)
plt.plot(np.arange(len(val_data)) + 1,
val_data, "r.-",
label="Validation " + label)
plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
plt.legend(fontsize=14)
plt.xlabel("Epochs")
plt.ylabel(label)
plt.grid(True)
sum(yvalid == ytrain.value_counts().idxmax()) / yvalid.shape[0]
model = Sequential()
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=30,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
dnn_hidden_units = [16, 8]
model = Sequential()
for layer in dnn_hidden_units:
model.add(Dense(units=layer,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
model = Sequential()
# Convolutional layer
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(Conv1D(filters=5,
kernel_size=5,
strides=2,
padding="valid",
input_shape=[None, 1]))
model.add(MaxPool1D(pool_size=2,
strides=None,
padding='valid'))
# Flatten the result and pass through DNN.
model.add(tf.keras.layers.Flatten())
model.add(Dense(units=N_TIME_STEPS//4,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(LSTM(N_TIME_STEPS // 8,
activation='relu',
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
# Create the model.
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=40,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
rnn_hidden_units = [N_TIME_STEPS // 16,
N_TIME_STEPS // 32]
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
for layer in rnn_hidden_units[:-1]:
model.add(GRU(units=layer,
activation='relu',
return_sequences=True))
model.add(GRU(units=rnn_hidden_units[-1],
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=50,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create simulated data that can be used in this proof of concept
Step2: Now based on the results above we can use this as a prediction of the Y-data from the X-data into the future
Step3: Or do this smarter by sampling the posterior
|
<ASSISTANT_TASK:>
Python Code:
!date
# Standard Library Python Modules
# Common Python Modules
import matplotlib.pyplot as plt
import numpy as np
import spacepy.plot as spp
import spacepy.toolbox as tb
import pandas as pd
import pymc # this is the MCMC tool
# put plots into this document
%matplotlib inline
# observed data
from scipy.signal import savgol_filter
# make a time dependent x
t_x = np.random.uniform(0, 1, 500)
t_x = savgol_filter(t_x, 95, 2)[95//2:-95//2]
t_x -= t_x.min()
t_x /= (t_x.max() - t_x.min())
plt.plot(t_x)
plt.xlabel('time')
plt.ylabel('SWx data value')
a = 6
b = 2
sigma = 2.0
y_obs = a*t_x + b + np.random.normal(0, sigma, len(t_x))
data = pd.DataFrame(np.array([t_x, y_obs]).T, columns=['x', 'y'])
x = t_x
data.plot(x='x', y='y', kind='scatter', s=50)
plt.xlabel('SWx inst')
plt.ylabel('Mission Inst')
# define priors
a = pymc.Normal('slope', mu=0, tau=1.0/10**2)
b = pymc.Normal('intercept', mu=0, tau=1.0/10**2)
tau = pymc.Gamma("tau", alpha=0.1, beta=0.1)
# define likelihood
@pymc.deterministic
def mu(a=a, b=b, x=x):
return a*x + b
y = pymc.Normal('y', mu=mu, tau=tau, value=y_obs, observed=True)
# inference
m = pymc.Model([a, b, tau, x, y])
mc = pymc.MCMC(m)
# run 6 chains
for i in range(6):
mc.sample(iter=90000, burn=10000)
# plot up the data and overplot the possible fit lines
data.plot(x='x', y='y', kind='scatter', s=50)
xx = np.linspace(data.x.min(), data.x.max(), 10)
for ii in range(0, len(mc.trace('slope', chain=None)[:]),
len(mc.trace('slope', chain=None)[:])//400):
yy = (xx*mc.trace('slope', chain=None)[:][ii] +
mc.trace('intercept', chain=None)[:][ii])
plt.plot(xx,yy, c='r')
pymc.Matplot.plot(mc)
pymc.Matplot.summary_plot(mc)
int_vals = mc.stats()['intercept']['95% HPD interval']
slope_vals = mc.stats()['slope']['95% HPD interval']
print(int_vals, slope_vals)
y_inst = np.tile(t_x, (2,1)).T * slope_vals + int_vals
plt.plot(y_inst, c='r')
plt.xlabel('time')
plt.ylabel('Mission inst value')
plt.title('Major upper limit of spread')
pred = []
for v in t_x:
pred.append(np.percentile(v * mc.trace('slope',
chain=None)[:] +
mc.trace('intercept',
chain=None)[:],
[2.5, 97.5]))
plt.plot(pred, c='r')
plt.xlabel('time')
plt.ylabel('Predicted Mission inst value')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'fio-ronm', 'sandbox-1', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bisecting K-Means
Step2: Cutting the tree structure
|
<ASSISTANT_TASK:>
Python Code:
km = pyclust.KMeans(n_clusters=5)
km.fit(df.iloc[:,0:2].values)
print(km.centers_)
plot_scatter(df.iloc[:,0:2].values, labels=km.labels_, title="Scatter Plot: K-Means")
bkm = pyclust.BisectKMeans(n_clusters=5)
bkm.fit(df.iloc[:,0:2].values)
print(bkm.labels_)
plot_scatter(df.iloc[:,0:2].values, labels=bkm.labels_, title="Scatter Plot: Bisecting K-Means")
bkm.tree_.show(line_type='ascii')
plot_scatter(df.iloc[:,0:2].values, labels=bkm.cut(2)[0], title="Scatter Plot: Bisecting K-Means (2)")
plot_scatter(df.iloc[:,0:2].values, labels=bkm.cut(3)[0], title="Scatter Plot: Bisecting K-Means (3)")
plot_scatter(df.iloc[:,0:2].values, labels=bkm.cut(4)[0], title="Scatter Plot: Bisecting K-Means (4)")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We're going to download recent crimes from the City of Chicago's data portal.
Step2: It looks like the column named "Date" with an ID of "154418879" is the one we want.
Step3: The response data contains column IDs rather than column names. Let's build a lookup table to convert them later, and a helper function to "fix" our rows that we get from the API.
Step5: Let's build a query for the Socrata API.
Step6: Now let's request the data from the API, using our query
|
<ASSISTANT_TASK:>
Python Code:
import json
import requests
CRIME_SOCRATA_VIEW_ID = 'ijzp-q8t2'
def get_data_portal_url(view_id):
return 'http://data.cityofchicago.org/api/views/{view_id}'.format(
view_id=view_id)
def get_dataset_columns(view_id):
Get dataset field names from the Socrata API
Returns:
A dictionary that acts as a lookup table from column ID to column name
url = get_data_portal_url(view_id)
meta_response = requests.get(url)
if not meta_response.ok:
meta_response.raise_for_status()
meta = meta_response.json()
return {c['id']: c['name'] for c in meta['columns']}
columns = get_dataset_columns(CRIME_SOCRATA_VIEW_ID)
for column_id, name in columns.items():
print("{}: {}".format(column_id, name))
date_column_id, date_column_name = next((i, n) for i, n in columns.items() if n.lower() == "date")
print("Date column ID: {}".format(date_column_id))
def slugify(s, replacement='_'):
return s.replace(' ', replacement).lower()
def get_clean_column_lookup(column_lookup):
return {str(i): slugify(n) for i, n in column_lookup.items()}
human_columns = get_clean_column_lookup(columns)
import pprint
pprint.pprint(human_columns)
def humanize_columns(row, column_lookup):
humanized = {}
for column_id, value in row.items():
try:
humanized[column_lookup[column_id]] = value
except KeyError:
humanized[column_id] = value
return humanized
from datetime import date, timedelta
def build_query(since_date, date_column_id, view_id):
Get a Socrata API query for all records updated after the last update
Args:
since_date (datetine.date): date object. All crimes since this date will be retrieved.
date_column_id (str): String containing the column ID for the dates we'll filter on
view_id (str): Socrata view ID for this dataset
Returns:
Dictionary that can be serialized into a JSON sring used as the POST
body to the Socrata API
query = {
'originalViewId': view_id,
'name': 'inline filter',
'query' : {
'filterCondition': {
'type': 'operator',
'value': 'AND',
'children' : [{
'type' : 'operator',
'value' : 'GREATER_THAN',
'children': [{
'columnId' : date_column_id,
'type' : 'column',
}, {
'type' : 'literal',
'value' : since_date.strftime('%Y-%m-%d'),
}],
}],
},
}
}
return query
# Months are different lenghts. Let's just find the date 30 days ago
today = date.today()
date_30_days_ago = today - timedelta(days=30)
query = build_query(date_30_days_ago, date_column_id, CRIME_SOCRATA_VIEW_ID)
import pprint
print("The query looks like this: ")
pprint.pprint(query)
import json
import requests
def get_rows_url(start, count):
url_tpl = "https://data.cityofchicago.org/api/views/INLINE/rows.json?method=getRows&start={start}&length={length}"
return url_tpl.format(
start=start,
length=count
)
def get_rows(query, start=0, count=1000):
url = get_rows_url(start, count)
headers = { 'content-type' : 'application/json' }
response = requests.post(url, data=json.dumps(query), headers=headers, verify=False)
return response.json()
def transform_row(row, transforms):
transformed_row = row
for transform in transforms:
transformed_row = transform(transformed_row)
return transformed_row
def get_all_rows(query, transforms=[]):
continue_fetching = True
page_size = 1000
start = 0
while continue_fetching:
rows = get_rows(query, start, page_size)
if len(rows) < page_size:
continue_fetching = False
start += page_size
for row in rows:
yield(transform_row(row, transforms))
crimes = list(get_all_rows(query, transforms=[lambda r: humanize_columns(r, human_columns)]))
import pprint
print("There are {} crimes since {}".format(len(crimes), date_30_days_ago.strftime("%Y-%m-%d")))
print("The first one looks like: ")
pprint.pprint(crimes[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We load a file containing the relevant GMSK transmission. The recording was done at the Dwingeloo radiotelescope and can be obtained here. Remember to edit the path below to point to the correct file.
Step2: The 500bps GMSK signal is converted down to baseband and lowpass filtered to 1600Hz.
Step3: Perform arctangent FSK demodulation.
Step4: Correct for phase wrapping.
Step5: We extract the soft bits by guessing the correct clock phase and decimating. No pulse shaping matched filtering has been done, and tracking of the clock frequency is not necessary either. The separation between the bits 1 and 0 is good enough for demodulation without bit errors.
Step6: Soft bits are now converted to hard bits.
Step7: Now we undo GMSK precoding, since we want to export the OQPSK bits. This part is tricky and use the knowledge of the ASM to correct for phase ambiguities.
Step8: However we need to consider another branch in which I and Q are swapped, so the sign of one of them is inverted (consider swapping I and Q versus multiplying by 1j).
Step9: We correlate both branches against the uncoded ASM. Note the correlation can happen on either branch and have either sign.
Step10: We take note of all correlation positions, the branch where they happen and their sign to correct the phase and output packets that can be fed into the GNU Radio decoder. Note that we're always dealing with hard bits for simplicity.
Step11: Now we plot the phase of the signal in the problematic zone. The change in slope shows the frequency jump.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
fs = 40000 # sample rate
skip = 3200 * fs * 8 # first transmission seems to have a missing samples from the recorder
skip = 5550 * fs * 8
length = 140 * fs
with open('/home/daniel/Descargas/DSLWP-B_PI9CAM_2019-02-27T07_26_01_436.4MHz_40ksps_complex.raw') as f:
f.seek(skip)
x = np.fromfile(f, dtype='complex64', count = length)
f = 400
x = x * np.exp(-1j*2*np.pi*np.arange(x.size)*f/fs).astype('complex64')
h = scipy.signal.firwin(1000, 0.02).astype('float32')
x = scipy.signal.lfilter(h, 1, x).astype('complex64')
s = np.diff(np.angle(x).astype('float32'))
s[s > np.pi] -= 2*np.pi
s[s < -np.pi] += 2*np.pi
plt.plot(s)
phase = 40
softbits = s[np.int32(np.arange(phase,s.size, 80))]
softbits = softbits - 5e-8 * np.arange(softbits.size) - 1e-3 # correction for frequency offset and drift
plt.plot(softbits,'.')
plt.axhline(y = 0, color='green')
plt.ylim([-0.05,0.05]);
bits = (softbits > 0)*1
decbits = np.cumsum(np.int32(2*bits-1))%4
decbits[::2] = (decbits[::2] == 1)*1
decbits[1::2] = (decbits[1::2] == 0)*1
decbits_inv = decbits.copy()
decbits_inv[::2] ^= 1
asm = np.unpackbits(np.array([0x03,0x47,0x76,0xC7,0x27,0x28,0x95,0xB0, 0xFC, 0xB8, 0x89, 0x38, 0xD8, 0xD7, 0x6A, 0x4F], dtype='uint8'))
asm_straight_corr = scipy.signal.correlate(2*decbits-1, 2*asm.astype('float')-1)
asm_inv_corr = scipy.signal.correlate(2*decbits_inv-1, 2*asm.astype('float')-1)
plt.figure()
plt.plot(asm_straight_corr)
plt.figure()
plt.plot(asm_inv_corr);
corr_positions = np.sort(np.concatenate((np.where(np.abs(asm_straight_corr) > 100)[0], np.where(np.abs(asm_inv_corr) > 100)[0])))
with open('/tmp/dslwp_bits', 'w') as file:
for st in corr_positions:
if np.abs(asm_straight_corr[st]) > 100:
b = np.sign(asm_straight_corr[st])*(2*decbits[st+1:st+1+7152]-1)
else:
b = np.sign(asm_inv_corr[st])*(2*decbits_inv[st+1:st+1+7152]-1)
b.astype('float32').tofile(file)
phase = np.unwrap(np.angle(x[1000000:3000000]).astype('float32'))
plt.plot(scipy.signal.detrend(phase))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this example, we are going to use the built-in fulladder from Mantle.
Step2: A common name for a full adder is a carry-sum adder, or csa.
Step3: To construct the 8-bit popcount, we first use 3 csa's to sum
Step4: Test bench
|
<ASSISTANT_TASK:>
Python Code:
import magma as m
from mantle import FullAdder
# 2 input
def csa2(I0, I1):
return m.bits(FullAdder()(I0, I1, 0))
# 3 input
def csa3(I0, I1, I2):
return m.bits(FullAdder()(I0, I1, I2))
def popcount8(I):
# Dadda dot notation (of the result)
# o o csa0_0_21 - row 0, bits 2 and 1
# o o csa0_1_21 - row 1, bits 2 and 1
# o o csa0_2_21 - row 2, bits 2 and 1
csa0_0_21 = csa3(I[0], I[1], I[2])
csa0_1_21 = csa3(I[3], I[4], I[5])
csa0_2_21 = csa2(I[6], I[7])
# o o csa1_0_21 - row 0, bits 2 and 1
# o o csa1_1_43 - row 1, bits 4 and 2
csa1_0_21 = csa3(csa0_0_21[0], csa0_1_21[0], csa0_2_21[0])
csa1_1_42 = csa3(csa0_0_21[1], csa0_1_21[1], csa0_2_21[1])
# o o csa2_0_42 - row 0, bits 4 and 2
csa2_0_42 = csa2(csa1_0_21[1], csa1_1_42[0])
# o o csa3_0_84 - row 0, bits 8 and 4
csa3_0_84 = csa2(csa1_1_42[1], csa2_0_42[1])
return m.bits([csa1_0_21[0], csa2_0_42[0], csa3_0_84[0], csa3_0_84[1]])
import fault
class Main(m.Circuit):
io = m.IO(I=m.In(m.Bits[8]), O=m.Out(m.Bits[4]))
io.O @= popcount8(io.I)
tester = fault.PythonTester(Main)
assert tester(0xFF) == 8
assert tester(0xF0) == 4
assert tester(0xEE) == 6
m.compile('build/popcount8', Main, inline=True)
!cat build/popcount8.v
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create and launch AWS instances.
Step2: MNIST dataset
Step3: Train a NN to predict the numbers (as simple as it gets)
Step4: Train with some given parameters...
Step5: Alright, how about something else...
Step6: and now something different than that..
Step7: Issue
Step8: This will take a while, even if using all cores on a local machine; let's distribute the workload
Step9: Connect to the resulting scheduler
Step14: Define functions which will be distributed to workers...
Step15: Run test functions...
Step16: Distribute the actual work
Step17: Also create a distributed queue system...
Step18: queue is currently empty...
Step19: Start submitting jobs to the queue with a thread
Step20: and begin receiving the results...
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Hide info messages from paramiko
import logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.WARN)
import time
import random
import threading
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (2, 2)
from distributed import progress, Client
from pprint import pprint
from sklearn.decomposition import PCA
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_digits
from cclyde.cluster import Cluster
cluster = Cluster(key_name='default_windows', n_nodes=16, cluster_name='default', instance_type='t2.medium')
cluster.configure()
cluster.launch_instances_nonblocking()
X, y = load_digits(return_X_y=True)
X = np.asarray([x.flatten() for x in X])
for i in range(3):
plt.imshow(X[i].reshape((8, 8)), cmap='Greys_r')
plt.title('Digit: {}'.format(y[i]))
plt.show()
pca = PCA(n_components=30)
print 'Features before: ', X.shape[1]
X = pca.fit_transform(X)
print 'Features after: ', X.shape[1]
print '{}% Explained Variance'.format(round(sum(pca.explained_variance_ratio_) * 100, 1))
lr = MLPClassifier(hidden_layer_sizes=(10, 5), batch_size=10,
solver='sgd', learning_rate_init=0.01, early_stopping=True)
start = time.time()
scores = cross_val_score(estimator=lr,
X=X,
y=y,
cv=5)
print("\nAccuracy: {}% (+/- {})".format(round(scores.mean() * 100, 2), round(scores.std(), 3) * 2))
print('Finished in {}sec\n'.format(round(time.time() - start, 2)))
lr = MLPClassifier(hidden_layer_sizes=(10, 10,), batch_size=100,
solver='sgd', learning_rate_init=0.01, early_stopping=True)
start = time.time()
scores = cross_val_score(estimator=lr,
X=X,
y=y,
cv=5)
print("\nAccuracy: {}% (+/- {})".format(round(scores.mean() * 100, 2), round(scores.std(), 3) * 2))
print('Finished in {}sec\n'.format(round(time.time() - start, 2)))
lr = MLPClassifier(hidden_layer_sizes=(10, 10, 10,), batch_size=100,
solver='sgd', learning_rate_init=0.01, early_stopping=True)
start = time.time()
scores = cross_val_score(estimator=lr,
X=X,
y=y,
cv=5)
print("\nAccuracy: {}% (+/- {})".format(round(scores.mean() * 100, 2), round(scores.std(), 3) * 2))
print('Finished in {}sec\n'.format(round(time.time() - start, 2)))
# Define hyper parameter ranges
batch_sizes = np.linspace(start=5, stop=750, num=50, dtype=np.int64)
n_layers = range(1, 8, 1)
# Make a list of all combinations
params = []
for batch_size in batch_sizes:
for n_layer in n_layers:
n_neuron = np.random.randint(low=5, high=200)
params.append({'batch_size': batch_size,
'hidden_layer_sizes': tuple(n_neuron for _ in range(n_layer)),
'solver': 'sgd',
'learning_rate_init': 0.01,
'early_stopping': True
})
print '{} different combinations.'.format(len(params))
pprint(params[:2])
print 'Lauching thread is alive: ', cluster.instance_launching_thread.is_alive()
cluster.install_anaconda()
cluster.install_python_packages(['scikit-learn', 'numpy', 'pandas', 'dask', 'futures'], method='conda')
scheduler_address = cluster.launch_dask()
c = Client(address=scheduler_address)
c
def get_data(kwargs):
Function which gets data and performs PCA on it.
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
import numpy as np
X, y = load_digits(return_X_y=True)
X = np.asarray([x.flatten() for x in X])
pca = PCA(n_components=30)
X = pca.fit_transform(X)
return (kwargs, X, y)
def model_tester(package):
Function which is mapped to cluster. Passes kwargs to model to be trained.
Returns score based on those kwargs.
kwargs, X, y = package
import time
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import cross_val_score
# Initialize model with given kwargs
lr = MLPClassifier(**kwargs)
scores = cross_val_score(estimator=lr,
X=X,
y=y,
cv=5)
return (kwargs, scores.mean(), scores.std())
def score_combiner(package):
Not needed, but more functions == more pretty colors
import time
import random
time.sleep(random.random())
kwargs, score_m, score_std = package
kwargs.update({'score': score_m, 'std': score_std})
return kwargs
def double(n):
'''
Useless worker function # 1
'''
import time
import random
import sklearn
time.sleep(random.random())
return n * 2, 2
def add_two(package):
Useless worker function # 2
n, n2 = package
import time
import random
time.sleep(random.random())
return n + n2
futures = c.map(double, range(250))
futures = c.map(add_two, futures)
progress(futures)
futures = c.map(get_data, params)
futures = c.map(model_tester, futures)
futures = c.map(score_combiner, futures)
progress(futures)
results = c.gather(futures)
df = pd.DataFrame(results)
df['n_layers'] = df.hidden_layer_sizes.map(lambda _tuple: len(_tuple))
df['n_neurons'] = df.hidden_layer_sizes.map(lambda _tuple: _tuple[0])
df.head()
df.n_layers.unique()
data = []
for n_layers in df.n_layers.unique():
temp = df[df.n_layers == n_layers]
trace = go.Scatter(
x = temp.n_neurons,
y = temp.n_layers,
mode='markers',
text=['{}%<br>Layers: {}'.format(round(v * 100, 2), l)
for v, l in zip(temp.score.values, temp.n_layers.values)],
name='{} layers'.format(n_layers),
marker=dict(
size=temp.batch_size / 20.0,
color = temp.score, #set color equal to a variable
colorscale='Viridis',
showscale=False
)
)
data.append(trace)
layout = dict(title = 'Best performing models.<br>(size = batch size)',
xaxis = dict(zeroline = False, title='Neuron Count'),
yaxis = dict(zeroline = False, title='Layer Count'),
)
fig = dict(data=data, layout=layout)
py.iplot(fig, filename='styled-scatter')
df.ix[df.score.argmax(), :]
from Queue import Queue
local_q = Queue()
remote_q = c.scatter(local_q)
def long_calc1(n):
import time
import random
time.sleep(random.random())
return n + 2
def long_calc2(n):
import time
import random
time.sleep(random.random())
return n * 2
def long_calc3(n):
import time
import random
time.sleep(random.random())
return n - 2
long_calc1_q = c.map(long_calc1, remote_q)
long_calc2_q = c.map(long_calc2, long_calc1_q)
long_calc3_q = c.map(long_calc3, long_calc2_q)
result_q = c.gather(long_calc3_q)
result_q.qsize()
def start_jobs():
jobs = range(500)
for job in jobs:
time.sleep(random.random())
local_q.put(job)
return
thread = threading.Thread(target=start_jobs)
thread.start()
def get_jobs():
while True:
print result_q.get()
return
finish_thread = threading.Thread(target=get_jobs)
finish_thread.start()
cluster.terminate_cluster()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: scikit-learn HP Tuning on AI Platform
Step2: The data
Step3: Load the hyperparameter values that are passed to the model during training.
Step4: Add code to download the data from GCS
Step5: Use the Hyperparameters
Step6: Report the mean accuracy as hyperparameter tuning objective metric.
Step7: Export and save the model to GCS
Step8: Part 2
Step9: Next, we need to set the hp tuning values used to train our model. Check HyperparameterSpec for more info.
Step10: Lastly, we need to install the dependencies used in our model. Check adding_standard_pypi_dependencies for more info.
Step11: Part 3
Step12: Submit the training job.
Step13: [Optional] StackDriver Logging
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
%env PROJECT_ID PROJECT_ID
%env BUCKET_ID BUCKET_ID
%env JOB_DIR gs://BUCKET_ID/scikit_learn_job_dir
%env REGION us-central1
%env TRAINER_PACKAGE_PATH ./auto_mpg_hp_tuning
%env MAIN_TRAINER_MODULE auto_mpg_hp_tuning.train
%env RUNTIME_VERSION 1.9
%env PYTHON_VERSION 3.5
%env HPTUNING_CONFIG hptuning_config.yaml
! mkdir auto_mpg_hp_tuning
%%writefile ./auto_mpg_hp_tuning/train.py
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import os
import pandas as pd
import subprocess
from google.cloud import storage
import hypertune
from sklearn.externals import joblib
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
%%writefile -a ./auto_mpg_hp_tuning/train.py
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir', # handled automatically by AI Platform
help='GCS location to write checkpoints and export models',
required=True
)
parser.add_argument(
'--alpha', # Specified in the config file
help='Constant that multiplies the L1 term.',
default=1.0,
type=float
)
parser.add_argument(
'--max_iter', # Specified in the config file
help='The maximum number of iterations.',
default=1000,
type=int
)
parser.add_argument(
'--tol', # Specified in the config file
help='The tolerance for the optimization: if the updates are smaller than tol, '
'the optimization code checks the dual gap for optimality and continues '
'until it is smaller than tol.',
default=0.0001,
type=float
)
parser.add_argument(
'--selection', # Specified in the config file
help='Supported criteria are “cyclic” loop over features sequentially and '
'“random” a random coefficient is updated every iteration ',
default='cyclic'
)
args = parser.parse_args()
%%writefile -a ./auto_mpg_hp_tuning/train.py
# Public bucket holding the auto mpg data
bucket = storage.Client().bucket('cloud-samples-data')
# Path to the data inside the public bucket
blob = bucket.blob('ml-engine/auto_mpg/auto-mpg.data')
# Download the data
blob.download_to_filename('auto-mpg.data')
# ---------------------------------------
# This is where your model code would go. Below is an example model using the auto mpg dataset.
# ---------------------------------------
# Define the format of your input data including unused columns
# (These are the columns from the auto-mpg data files)
COLUMNS = (
'mpg',
'cylinders',
'displacement',
'horsepower',
'weight',
'acceleration',
'model-year',
'origin',
'car-name'
)
# Load the training auto mpg dataset
with open('./auto-mpg.data', 'r') as train_data:
raw_training_data = pd.read_csv(train_data, header=None, names=COLUMNS, delim_whitespace=True)
# Remove the column we are trying to predict ('mpg') from our features list
# Convert the Dataframe to a lists of lists
features = raw_training_data.drop('mpg', axis=1).drop('car-name', axis=1).values.tolist()
# Create our training labels list, convert the Dataframe to a lists of lists
labels = raw_training_data['mpg'].values.tolist()
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size=0.15)
%%writefile -a ./auto_mpg_hp_tuning/train.py
# Create the regressor, here we will use a Lasso Regressor to demonstrate the use of HP Tuning.
# Here is where we set the variables used during HP Tuning from
# the parameters passed into the python script
regressor = Lasso(
alpha=args.alpha,
max_iter=args.max_iter,
tol=args.tol,
selection=args.selection)
# Transform the features and fit them to the regressor
regressor.fit(train_features, train_labels)
%%writefile -a ./auto_mpg_hp_tuning/train.py
# Calculate the mean accuracy on the given test data and labels.
score = regressor.score(test_features, test_labels)
# The default name of the metric is training/hptuning/metric.
# We recommend that you assign a custom name. The only functional difference is that
# if you use a custom name, you must set the hyperparameterMetricTag value in the
# HyperparameterSpec object in your job request to match your chosen name.
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#HyperparameterSpec
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='my_metric_tag',
metric_value=score,
global_step=1000)
%%writefile -a ./auto_mpg_hp_tuning/train.py
# Export the model to a file
model_filename = 'model.joblib'
joblib.dump(regressor, model_filename)
# Example: job_dir = 'gs://BUCKET_ID/scikit_learn_job_dir/1'
job_dir = args.job_dir.replace('gs://', '') # Remove the 'gs://'
# Get the Bucket Id
bucket_id = job_dir.split('/')[0]
# Get the path
bucket_path = job_dir.lstrip('{}/'.format(bucket_id)) # Example: 'scikit_learn_job_dir/1'
# Upload the model to GCS
bucket = storage.Client().bucket(bucket_id)
blob = bucket.blob('{}/{}'.format(
bucket_path,
model_filename))
blob.upload_from_filename(model_filename)
%%writefile ./auto_mpg_hp_tuning/__init__.py
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note that __init__.py can be an empty file.
%%writefile ./hptuning_config.yaml
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# hyperparam.yaml
trainingInput:
hyperparameters:
goal: MAXIMIZE
maxTrials: 30
maxParallelTrials: 5
hyperparameterMetricTag: my_metric_tag
enableTrialEarlyStopping: TRUE
params:
- parameterName: alpha
type: DOUBLE
minValue: 0.0
maxValue: 10.0
scaleType: UNIT_LINEAR_SCALE
- parameterName: max_iter
type: INTEGER
minValue: 1000
maxValue: 5000
scaleType: UNIT_LINEAR_SCALE
- parameterName: tol
type: DOUBLE
minValue: 0.0001
maxValue: 0.1
scaleType: UNIT_LINEAR_SCALE
- parameterName: selection
type: CATEGORICAL
categoricalValues: [
"cyclic",
"random"
]
%%writefile ./setup.py
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['cloudml-hypertune']
setup(
name='auto_mpg_hp_tuning',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Auto MPG sklearn HP tuning training application'
)
! gcloud config set project $PROJECT_ID
! gcloud ml-engine jobs submit training auto_mpg_hp_tuning_$(date +"%Y%m%d_%H%M%S") \
--job-dir $JOB_DIR \
--package-path $TRAINER_PACKAGE_PATH \
--module-name $MAIN_TRAINER_MODULE \
--region $REGION \
--runtime-version=$RUNTIME_VERSION \
--python-version=$PYTHON_VERSION \
--scale-tier BASIC \
--config $HPTUNING_CONFIG
! gsutil ls $JOB_DIR/*
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cleaning Up Yes/No Columns
Step2: Cleaning Up Checkbox Columns
Step3: Cleaning Up Ranking Columns
Step4: Find the Highest Ranked Movie
Step5: Remember, here a lower ranking is better! So as suspected, overall the original movies are rated higher than the newer ones and Empire Strikes Back is rated best of all with Return of the Jedi coming in 2nd.
Step6: It appears that the original movies were seen more than the newer ones.
Step7: It looks like women liked the newer movies more than men, at least the first one or two.
|
<ASSISTANT_TASK:>
Python Code:
# Import the necessary stuff
import pandas as pd
# Read the dataset into a Pandas DataFrame
star_wars = pd.read_csv("../data/star_wars.csv", encoding="ISO-8859-1")
# Explore the data by looking at the first 10 rows.
star_wars.head(10)
# NOTE: Lots of NaN, even for stuff like RespondentID. Needs cleaning.
# Look at the column names
star_wars.columns
# Remove any rows where RespondentId is NaN.
star_wars = star_wars[pd.notnull(star_wars["RespondentID"])]
star_wars.head()
yes_no = {'Yes': True, 'No': False}
col1 = 'Have you seen any of the 6 films in the Star Wars franchise?'
col2 = 'Do you consider yourself to be a fan of the Star Wars film franchise?'
# Convert both these columns to bollean
star_wars[col1] = star_wars[col1].map(yes_no)
star_wars[col2] = star_wars[col2].map(yes_no)
# At the end, both columsn should only have True, False, or NaN values
star_wars.head()
# For each column above, convert the column to only contain True and False
import numpy as np
movie_mapping = {
"Star Wars: Episode I The Phantom Menace": True,
np.nan: False,
"Star Wars: Episode II Attack of the Clones": True,
"Star Wars: Episode III Revenge of the Sith": True,
"Star Wars: Episode IV A New Hope": True,
"Star Wars: Episode V The Empire Strikes Back": True,
"Star Wars: Episode VI Return of the Jedi": True
}
for col in star_wars.columns[3:9]:
star_wars[col] = star_wars[col].map(movie_mapping)
star_wars.head()
# Rename each of the columns above to have more clear names
star_wars = star_wars.rename(columns={
"Which of the following Star Wars films have you seen? Please select all that apply.": "seen_1",
"Unnamed: 4": "seen_2",
"Unnamed: 5": "seen_3",
"Unnamed: 6": "seen_4",
"Unnamed: 7": "seen_5",
"Unnamed: 8": "seen_6"
})
star_wars.head()
# Convert each column above to a float type
star_wars[star_wars.columns[9:15]] = star_wars[star_wars.columns[9:15]].astype(float)
star_wars.head()
# Rename each of the above columsn to a more descriptive name.
star_wars = star_wars.rename(columns={
"Please rank the Star Wars films in order of preference with 1 being your favorite film in the franchise and 6 being your least favorite film.": "ranking_1",
"Unnamed: 10": "ranking_2",
"Unnamed: 11": "ranking_3",
"Unnamed: 12": "ranking_4",
"Unnamed: 13": "ranking_5",
"Unnamed: 14": "ranking_6"
})
star_wars.head()
# Use the mean metod to compute the mean of each of the ranking columns
star_wars[star_wars.columns[9:15]].mean()
# Make a bar char of each ranking
%matplotlib inline
import matplotlib.pyplot as plt
plt.bar(range(6), star_wars[star_wars.columns[9:15]].mean())
# Uae the sum method to compute the sum of each of the seen columns
star_wars[star_wars.columns[3:9]].sum()
# Plot the view counts
plt.bar(range(6), star_wars[star_wars.columns[3:9]].sum())
# Split the data into 2 groups based on one of the above columns
males = star_wars[star_wars["Gender"] == "Male"]
females = star_wars[star_wars["Gender"] == "Female"]
# Plot rankings separately for men and women
plt.bar(range(6), males[males.columns[9:15]].mean())
plt.show()
plt.bar(range(6), females[females.columns[9:15]].mean())
plt.show()
# Plot view couns by gender
plt.bar(range(6), males[males.columns[3:9]].sum())
plt.show()
plt.bar(range(6), females[females.columns[3:9]].sum())
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data ready for work
Step2: Testing variogram function gamv
|
<ASSISTANT_TASK:>
Python Code:
#general imports
import pygslib
#get the data in gslib format into a pandas Dataframe
mydata= pygslib.gslib.read_gslib_file('../data/cluster.dat')
# This is a 2D file, in this GSLIB version we require 3D data and drillhole name or domain code
# so, we are adding constant elevation = 0 and a dummy BHID = 1
mydata['Zlocation']=0.
mydata['bhid']=1.
# printing to verify results
print (' \n **** 5 first rows in my datafile \n\n ', mydata.head(n=5))
# these are the parameters we need. Note that at difference of GSLIB this dictionary also stores
# the actual data (ex, X, Y, etc.).
#important! python is case sensitive 'bhid' is not equal to 'BHID'
parameters = {
'x' : mydata['Xlocation'].values,
'y' : mydata['Ylocation'].values,
'z' : mydata['Zlocation'].values,
'bhid' : mydata['bhid'].values,
'vr' : mydata['Primary'].values,
'tmin' : -1.0e21,
'tmax' : 1.0e21,
'nlag' : 10,
'xlag' : 1,
'ndir' : 10,
'ndip' : 10,
'orgdir': 0.,
'orgdip': 0.,
'isill' : 1,
'sills' : [mydata['Primary'].var()],
'ivtail' : [1],
'ivhead' : [1],
'ivtype' : [1]
}
#Now we are ready to calculate the veriogram
np, dis, gam, hm, tm, hv, tv = pygslib.gslib.gamv3D(parameters)
np
# create structured grid with data
import vtk
import vtk.util.numpy_support as vtknumpy
import math
import numpy as np
XYZPts = vtk.vtkPoints()
XYZPts.SetNumberOfPoints(parameters['ndip']*parameters['nlag']*parameters['ndir']*2)
angdir = (math.pi/180.)*180./(parameters['ndir'])
angdip = (math.pi/180.)*90./(parameters['ndip'])
orgdir = parameters['orgdir'] * math.pi/180.
orgdip = parameters['orgdip'] * math.pi/180.
id=-1
for k in range(-parameters['ndip']+1,parameters['ndip']+1):
for j in range(parameters['nlag']):
for i in range(parameters['ndir']):
id+=1
x= parameters['xlag']*(j+1)*math.cos(angdir*i-orgdir)*math.cos(angdip*k-orgdip)
y= parameters['xlag']*(j+1)*math.sin(angdir*i-orgdir)*math.cos(angdip*k-orgdip)
z= parameters['xlag']*(j+1)* math.sin(angdip*k-orgdip)
print (id, i,j,k, angdir*i*(180/math.pi), angdip*k*(180/math.pi),x,y,z)
#print math.cos(angdip*k-orgdip)
XYZPts.SetPoint(id,x,y,z)
XYZGrid = vtk.vtkStructuredGrid()
XYZGrid.SetDimensions(parameters['ndir'],parameters['nlag'],parameters['ndip']*2-1)
XYZGrid.SetPoints(XYZPts)
ptid = np.arange(2*parameters['ndip']*parameters['nlag']*(parameters['ndir']))
cscalars = vtknumpy.numpy_to_vtk(ptid)
cscalars.SetName('PointID|')
XYZGrid.GetPointData().AddArray(cscalars)
#Write file
writer = vtk.vtkXMLStructuredGridWriter()
writer.SetFileName("output.vts")
writer.SetInputData(XYZGrid)
writer.Write()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Building a tree, regression
Step2: Visualizing the Tree, The Moons
Step3: Algorithms for Setting up Decision Trees
Step4: Computing the Gini Factor
Step5: Entropy and the ID3 algorithm
Step6: Cancer Data again now with Decision Trees and other Methods
Step7: Another example, the moons again
Step8: Playing around with regions
Step9: Regression trees
Step10: Final regressor code
Step11: Pros and cons of trees, pros
Step12: Using the Voting Classifier
Step13: Please, not the moons again! Voting and Bagging
Step14: Bagging Examples
Step15: Making your own Bootstrap
Step16: Random forests
Step17: Compare Bagging on Trees with Random Forests
Step18: Boosting, a Bird's Eye View
Step19: AdaBoost for Regression
Step20: Gradient Boosting, Classification Example
Step21: XGBoost
Step22: Xgboost on the Cancer Data
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
steps=250
distance=0
x=0
distance_list=[]
steps_list=[]
while x<steps:
distance+=np.random.randint(-1,2)
distance_list.append(distance)
x+=1
steps_list.append(x)
plt.plot(steps_list,distance_list, color='green', label="Random Walk Data")
steps_list=np.asarray(steps_list)
distance_list=np.asarray(distance_list)
X=steps_list[:,np.newaxis]
#Polynomial fits
#Degree 2
poly_features=PolynomialFeatures(degree=2, include_bias=False)
X_poly=poly_features.fit_transform(X)
lin_reg=LinearRegression()
poly_fit=lin_reg.fit(X_poly,distance_list)
b=lin_reg.coef_
c=lin_reg.intercept_
print ("2nd degree coefficients:")
print ("zero power: ",c)
print ("first power: ", b[0])
print ("second power: ",b[1])
z = np.arange(0, steps, .01)
z_mod=b[1]*z**2+b[0]*z+c
fit_mod=b[1]*X**2+b[0]*X+c
plt.plot(z, z_mod, color='r', label="2nd Degree Fit")
plt.title("Polynomial Regression")
plt.xlabel("Steps")
plt.ylabel("Distance")
#Degree 10
poly_features10=PolynomialFeatures(degree=10, include_bias=False)
X_poly10=poly_features10.fit_transform(X)
poly_fit10=lin_reg.fit(X_poly10,distance_list)
y_plot=poly_fit10.predict(X_poly10)
plt.plot(X, y_plot, color='black', label="10th Degree Fit")
plt.legend()
plt.show()
#Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regr_1=DecisionTreeRegressor(max_depth=2)
regr_2=DecisionTreeRegressor(max_depth=5)
regr_3=DecisionTreeRegressor(max_depth=11)
regr_1.fit(X, distance_list)
regr_2.fit(X, distance_list)
regr_3.fit(X, distance_list)
X_test = np.arange(0.0, steps, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3=regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, distance_list, s=2.5, c="black", label="data")
plt.plot(X_test, y_1, color="red",
label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="green", label="max_depth=5", linewidth=2)
plt.plot(X_test, y_3, color="m", label="max_depth=7", linewidth=2)
plt.xlabel("Data")
plt.ylabel("Darget")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
import os
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.tree import export_graphviz
from IPython.display import Image
from pydot import graph_from_dot_data
import pandas as pd
import numpy as np
cancer = load_breast_cancer()
X = pd.DataFrame(cancer.data, columns=cancer.feature_names)
print(X)
y = pd.Categorical.from_codes(cancer.target, cancer.target_names)
y = pd.get_dummies(y)
print(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
tree_clf = DecisionTreeClassifier(max_depth=5)
tree_clf.fit(X_train, y_train)
export_graphviz(
tree_clf,
out_file="DataFiles/cancer.dot",
feature_names=cancer.feature_names,
class_names=cancer.target_names,
rounded=True,
filled=True
)
cmd = 'dot -Tpng DataFiles/cancer.dot -o DataFiles/cancer.png'
os.system(cmd)
# Common imports
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_moons
from sklearn.tree import export_graphviz
from pydot import graph_from_dot_data
import pandas as pd
import os
np.random.seed(42)
X, y = make_moons(n_samples=100, noise=0.25, random_state=53)
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=0)
tree_clf = DecisionTreeClassifier(max_depth=5)
tree_clf.fit(X_train, y_train)
export_graphviz(
tree_clf,
out_file="DataFiles/moons.dot",
rounded=True,
filled=True
)
cmd = 'dot -Tpng DataFiles/moons.dot -o DataFiles/moons.png'
os.system(cmd)
# Common imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import export_graphviz
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from IPython.display import Image
from pydot import graph_from_dot_data
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("rideclass.csv"),'r')
# Read the experimental data with Pandas
from IPython.display import display
ridedata = pd.read_csv(infile,names = ('Outlook','Temperature','Humidity','Wind','Ride'))
ridedata = pd.DataFrame(ridedata)
# Features and targets
X = ridedata.loc[:, ridedata.columns != 'Ride'].values
y = ridedata.loc[:, ridedata.columns == 'Ride'].values
# Create the encoder.
encoder = OneHotEncoder(handle_unknown="ignore")
# Assume for simplicity all features are categorical.
encoder.fit(X)
# Apply the encoder.
X = encoder.transform(X)
print(X)
# Then do a Classification tree
tree_clf = DecisionTreeClassifier(max_depth=2)
tree_clf.fit(X, y)
print("Train set accuracy with Decision Tree: {:.2f}".format(tree_clf.score(X,y)))
#transfer to a decision tree graph
export_graphviz(
tree_clf,
out_file="DataFiles/ride.dot",
rounded=True,
filled=True
)
cmd = 'dot -Tpng DataFiles/cancer.dot -o DataFiles/cancer.png'
os.system(cmd)
# Split a dataset based on an attribute and an attribute value
def test_split(index, value, dataset):
left, right = list(), list()
for row in dataset:
if row[index] < value:
left.append(row)
else:
right.append(row)
return left, right
# Calculate the Gini index for a split dataset
def gini_index(groups, classes):
# count all samples at split point
n_instances = float(sum([len(group) for group in groups]))
# sum weighted Gini index for each group
gini = 0.0
for group in groups:
size = float(len(group))
# avoid divide by zero
if size == 0:
continue
score = 0.0
# score the group based on the score for each class
for class_val in classes:
p = [row[-1] for row in group].count(class_val) / size
score += p * p
# weight the group score by its relative size
gini += (1.0 - score) * (size / n_instances)
return gini
# Select the best split point for a dataset
def get_split(dataset):
class_values = list(set(row[-1] for row in dataset))
b_index, b_value, b_score, b_groups = 999, 999, 999, None
for index in range(len(dataset[0])-1):
for row in dataset:
groups = test_split(index, row[index], dataset)
gini = gini_index(groups, class_values)
print('X%d < %.3f Gini=%.3f' % ((index+1), row[index], gini))
if gini < b_score:
b_index, b_value, b_score, b_groups = index, row[index], gini, groups
return {'index':b_index, 'value':b_value, 'groups':b_groups}
dataset = [[0,0,0,0,0],
[0,0,0,1,1],
[1,0,0,0,1],
[2,1,0,0,1],
[2,2,1,0,1],
[2,2,1,1,0],
[1,2,1,1,1],
[0,1,0,0,0],
[0,2,1,0,1],
[2,1,1,0,1],
[0,1,1,1,1],
[1,1,0,1,1],
[1,0,1,0,1],
[2,1,0,1,0]]
split = get_split(dataset)
print('Split: [X%d < %.3f]' % ((split['index']+1), split['value']))
import re
import math
from collections import deque
# x is examples in training set
# y is set of targets
# label is target attributes
# Node is a class which has properties values, childs, and next
# root is top node in the decision tree
class Node(object):
def __init__(self):
self.value = None
self.next = None
self.childs = None
# Simple class of Decision Tree
# Aimed for who want to learn Decision Tree, so it is not optimized
class DecisionTree(object):
def __init__(self, sample, attributes, labels):
self.sample = sample
self.attributes = attributes
self.labels = labels
self.labelCodes = None
self.labelCodesCount = None
self.initLabelCodes()
# print(self.labelCodes)
self.root = None
self.entropy = self.getEntropy([x for x in range(len(self.labels))])
def initLabelCodes(self):
self.labelCodes = []
self.labelCodesCount = []
for l in self.labels:
if l not in self.labelCodes:
self.labelCodes.append(l)
self.labelCodesCount.append(0)
self.labelCodesCount[self.labelCodes.index(l)] += 1
def getLabelCodeId(self, sampleId):
return self.labelCodes.index(self.labels[sampleId])
def getAttributeValues(self, sampleIds, attributeId):
vals = []
for sid in sampleIds:
val = self.sample[sid][attributeId]
if val not in vals:
vals.append(val)
# print(vals)
return vals
def getEntropy(self, sampleIds):
entropy = 0
labelCount = [0] * len(self.labelCodes)
for sid in sampleIds:
labelCount[self.getLabelCodeId(sid)] += 1
# print("-ge", labelCount)
for lv in labelCount:
# print(lv)
if lv != 0:
entropy += -lv/len(sampleIds) * math.log(lv/len(sampleIds), 2)
else:
entropy += 0
return entropy
def getDominantLabel(self, sampleIds):
labelCodesCount = [0] * len(self.labelCodes)
for sid in sampleIds:
labelCodesCount[self.labelCodes.index(self.labels[sid])] += 1
return self.labelCodes[labelCodesCount.index(max(labelCodesCount))]
def getInformationGain(self, sampleIds, attributeId):
gain = self.getEntropy(sampleIds)
attributeVals = []
attributeValsCount = []
attributeValsIds = []
for sid in sampleIds:
val = self.sample[sid][attributeId]
if val not in attributeVals:
attributeVals.append(val)
attributeValsCount.append(0)
attributeValsIds.append([])
vid = attributeVals.index(val)
attributeValsCount[vid] += 1
attributeValsIds[vid].append(sid)
# print("-gig", self.attributes[attributeId])
for vc, vids in zip(attributeValsCount, attributeValsIds):
# print("-gig", vids)
gain -= vc/len(sampleIds) * self.getEntropy(vids)
return gain
def getAttributeMaxInformationGain(self, sampleIds, attributeIds):
attributesEntropy = [0] * len(attributeIds)
for i, attId in zip(range(len(attributeIds)), attributeIds):
attributesEntropy[i] = self.getInformationGain(sampleIds, attId)
maxId = attributeIds[attributesEntropy.index(max(attributesEntropy))]
return self.attributes[maxId], maxId
def isSingleLabeled(self, sampleIds):
label = self.labels[sampleIds[0]]
for sid in sampleIds:
if self.labels[sid] != label:
return False
return True
def getLabel(self, sampleId):
return self.labels[sampleId]
def id3(self):
sampleIds = [x for x in range(len(self.sample))]
attributeIds = [x for x in range(len(self.attributes))]
self.root = self.id3Recv(sampleIds, attributeIds, self.root)
def id3Recv(self, sampleIds, attributeIds, root):
root = Node() # Initialize current root
if self.isSingleLabeled(sampleIds):
root.value = self.labels[sampleIds[0]]
return root
# print(attributeIds)
if len(attributeIds) == 0:
root.value = self.getDominantLabel(sampleIds)
return root
bestAttrName, bestAttrId = self.getAttributeMaxInformationGain(
sampleIds, attributeIds)
# print(bestAttrName)
root.value = bestAttrName
root.childs = [] # Create list of children
for value in self.getAttributeValues(sampleIds, bestAttrId):
# print(value)
child = Node()
child.value = value
root.childs.append(child) # Append new child node to current
# root
childSampleIds = []
for sid in sampleIds:
if self.sample[sid][bestAttrId] == value:
childSampleIds.append(sid)
if len(childSampleIds) == 0:
child.next = self.getDominantLabel(sampleIds)
else:
# print(bestAttrName, bestAttrId)
# print(attributeIds)
if len(attributeIds) > 0 and bestAttrId in attributeIds:
toRemove = attributeIds.index(bestAttrId)
attributeIds.pop(toRemove)
child.next = self.id3Recv(
childSampleIds, attributeIds, child.next)
return root
def printTree(self):
if self.root:
roots = deque()
roots.append(self.root)
while len(roots) > 0:
root = roots.popleft()
print(root.value)
if root.childs:
for child in root.childs:
print('({})'.format(child.value))
roots.append(child.next)
elif root.next:
print(root.next)
def test():
f = open('DataFiles/rideclass.csv')
attributes = f.readline().split(',')
attributes = attributes[1:len(attributes)-1]
print(attributes)
sample = f.readlines()
f.close()
for i in range(len(sample)):
sample[i] = re.sub('\d+,', '', sample[i])
sample[i] = sample[i].strip().split(',')
labels = []
for s in sample:
labels.append(s.pop())
# print(sample)
# print(labels)
decisionTree = DecisionTree(sample, attributes, labels)
print("System entropy {}".format(decisionTree.entropy))
decisionTree.id3()
decisionTree.printTree()
if __name__ == '__main__':
test()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
# Logistic Regression
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
print("Test set accuracy with Logistic Regression: {:.2f}".format(logreg.score(X_test,y_test)))
# Support vector machine
svm = SVC(gamma='auto', C=100)
svm.fit(X_train, y_train)
print("Test set accuracy with SVM: {:.2f}".format(svm.score(X_test,y_test)))
# Decision Trees
deep_tree_clf = DecisionTreeClassifier(max_depth=None)
deep_tree_clf.fit(X_train, y_train)
print("Test set accuracy with Decision Trees: {:.2f}".format(deep_tree_clf.score(X_test,y_test)))
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Logistic Regression
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy Logistic Regression with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# Support Vector Machine
svm.fit(X_train_scaled, y_train)
print("Test set accuracy SVM with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# Decision Trees
deep_tree_clf.fit(X_train_scaled, y_train)
print("Test set accuracy with Decision Trees and scaled data: {:.2f}".format(deep_tree_clf.score(X_test_scaled,y_test)))
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
from sklearn.svm import SVC
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_moons
from sklearn.tree import export_graphviz
Xm, ym = make_moons(n_samples=100, noise=0.25, random_state=53)
deep_tree_clf1 = DecisionTreeClassifier(random_state=42)
deep_tree_clf2 = DecisionTreeClassifier(min_samples_leaf=4, random_state=42)
deep_tree_clf1.fit(Xm, ym)
deep_tree_clf2.fit(Xm, ym)
def plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3], iris=True, legend=False, plot_training=True):
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)
if not iris:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
if plot_training:
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor")
plt.plot(X[:, 0][y==2], X[:, 1][y==2], "g^", label="Iris-Virginica")
plt.axis(axes)
if iris:
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
else:
plt.xlabel(r"$x_1$", fontsize=18)
plt.ylabel(r"$x_2$", fontsize=18, rotation=0)
if legend:
plt.legend(loc="lower right", fontsize=14)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_decision_boundary(deep_tree_clf1, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False)
plt.title("No restrictions", fontsize=16)
plt.subplot(122)
plot_decision_boundary(deep_tree_clf2, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False)
plt.title("min_samples_leaf = {}".format(deep_tree_clf2.min_samples_leaf), fontsize=14)
plt.show()
np.random.seed(6)
Xs = np.random.rand(100, 2) - 0.5
ys = (Xs[:, 0] > 0).astype(np.float32) * 2
angle = np.pi/4
rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
Xsr = Xs.dot(rotation_matrix)
tree_clf_s = DecisionTreeClassifier(random_state=42)
tree_clf_s.fit(Xs, ys)
tree_clf_sr = DecisionTreeClassifier(random_state=42)
tree_clf_sr.fit(Xsr, ys)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_decision_boundary(tree_clf_s, Xs, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False)
plt.subplot(122)
plot_decision_boundary(tree_clf_sr, Xsr, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False)
plt.show()
# Quadratic training set + noise
np.random.seed(42)
m = 200
X = np.random.rand(m, 1)
y = 4 * (X - 0.5) ** 2
y = y + np.random.randn(m, 1) / 10
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(max_depth=2, random_state=42)
tree_reg.fit(X, y)
from sklearn.tree import DecisionTreeRegressor
tree_reg1 = DecisionTreeRegressor(random_state=42, max_depth=2)
tree_reg2 = DecisionTreeRegressor(random_state=42, max_depth=3)
tree_reg1.fit(X, y)
tree_reg2.fit(X, y)
def plot_regression_predictions(tree_reg, X, y, axes=[0, 1, -0.2, 1], ylabel="$y$"):
x1 = np.linspace(axes[0], axes[1], 500).reshape(-1, 1)
y_pred = tree_reg.predict(x1)
plt.axis(axes)
plt.xlabel("$x_1$", fontsize=18)
if ylabel:
plt.ylabel(ylabel, fontsize=18, rotation=0)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_regression_predictions(tree_reg1, X, y)
for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")):
plt.plot([split, split], [-0.2, 1], style, linewidth=2)
plt.text(0.21, 0.65, "Depth=0", fontsize=15)
plt.text(0.01, 0.2, "Depth=1", fontsize=13)
plt.text(0.65, 0.8, "Depth=1", fontsize=13)
plt.legend(loc="upper center", fontsize=18)
plt.title("max_depth=2", fontsize=14)
plt.subplot(122)
plot_regression_predictions(tree_reg2, X, y, ylabel=None)
for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")):
plt.plot([split, split], [-0.2, 1], style, linewidth=2)
for split in (0.0458, 0.1298, 0.2873, 0.9040):
plt.plot([split, split], [-0.2, 1], "k:", linewidth=1)
plt.text(0.3, 0.5, "Depth=2", fontsize=13)
plt.title("max_depth=3", fontsize=14)
plt.show()
tree_reg1 = DecisionTreeRegressor(random_state=42)
tree_reg2 = DecisionTreeRegressor(random_state=42, min_samples_leaf=10)
tree_reg1.fit(X, y)
tree_reg2.fit(X, y)
x1 = np.linspace(0, 1, 500).reshape(-1, 1)
y_pred1 = tree_reg1.predict(x1)
y_pred2 = tree_reg2.predict(x1)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred1, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.axis([0, 1, -0.2, 1.1])
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", fontsize=18, rotation=0)
plt.legend(loc="upper center", fontsize=18)
plt.title("No restrictions", fontsize=14)
plt.subplot(122)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred2, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.axis([0, 1, -0.2, 1.1])
plt.xlabel("$x_1$", fontsize=18)
plt.title("min_samples_leaf={}".format(tree_reg2.min_samples_leaf), fontsize=14)
plt.show()
heads_proba = 0.51
coin_tosses = (np.random.rand(10000, 10) < heads_proba).astype(np.int32)
cumulative_heads_ratio = np.cumsum(coin_tosses, axis=0) / np.arange(1, 10001).reshape(-1, 1)
plt.figure(figsize=(8,3.5))
plt.plot(cumulative_heads_ratio)
plt.plot([0, 10000], [0.51, 0.51], "k--", linewidth=2, label="51%")
plt.plot([0, 10000], [0.5, 0.5], "k-", label="50%")
plt.xlabel("Number of coin tosses")
plt.ylabel("Heads ratio")
plt.legend(loc="lower right")
plt.axis([0, 10000, 0.42, 0.58])
save_fig("votingsimple")
plt.show()
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=500, noise=0.30, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
log_clf = LogisticRegression(solver="liblinear", random_state=42)
rnd_clf = RandomForestClassifier(n_estimators=10, random_state=42)
svm_clf = SVC(gamma="auto", random_state=42)
voting_clf = VotingClassifier(
estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],
voting='hard')
voting_clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
for clf in (log_clf, rnd_clf, svm_clf, voting_clf):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__, accuracy_score(y_test, y_pred))
log_clf = LogisticRegression(solver="liblinear", random_state=42)
rnd_clf = RandomForestClassifier(n_estimators=10, random_state=42)
svm_clf = SVC(gamma="auto", probability=True, random_state=42)
voting_clf = VotingClassifier(
estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],
voting='soft')
voting_clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
for clf in (log_clf, rnd_clf, svm_clf, voting_clf):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__, accuracy_score(y_test, y_pred))
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=500, noise=0.30, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
log_clf = LogisticRegression(random_state=42)
rnd_clf = RandomForestClassifier(random_state=42)
svm_clf = SVC(random_state=42)
voting_clf = VotingClassifier(
estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],
voting='hard')
voting_clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
for clf in (log_clf, rnd_clf, svm_clf, voting_clf):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__, accuracy_score(y_test, y_pred))
log_clf = LogisticRegression(random_state=42)
rnd_clf = RandomForestClassifier(random_state=42)
svm_clf = SVC(probability=True, random_state=42)
voting_clf = VotingClassifier(
estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],
voting='soft')
voting_clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
for clf in (log_clf, rnd_clf, svm_clf, voting_clf):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__, accuracy_score(y_test, y_pred))
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
bag_clf = BaggingClassifier(
DecisionTreeClassifier(random_state=42), n_estimators=500,
max_samples=100, bootstrap=True, n_jobs=-1, random_state=42)
bag_clf.fit(X_train, y_train)
y_pred = bag_clf.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, y_pred))
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)
y_pred_tree = tree_clf.predict(X_test)
print(accuracy_score(y_test, y_pred_tree))
from matplotlib.colors import ListedColormap
def plot_decision_boundary(clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.5, contour=True):
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)
if contour:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", alpha=alpha)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", alpha=alpha)
plt.axis(axes)
plt.xlabel(r"$x_1$", fontsize=18)
plt.ylabel(r"$x_2$", fontsize=18, rotation=0)
plt.figure(figsize=(11,4))
plt.subplot(121)
plot_decision_boundary(tree_clf, X, y)
plt.title("Decision Tree", fontsize=14)
plt.subplot(122)
plot_decision_boundary(bag_clf, X, y)
plt.title("Decision Trees with Bagging", fontsize=14)
save_fig("baggingtree")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.utils import resample
from sklearn.tree import DecisionTreeRegressor
n = 100
n_boostraps = 100
maxdepth = 8
# Make data set.
x = np.linspace(-3, 3, n).reshape(-1, 1)
y = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2)+ np.random.normal(0, 0.1, x.shape)
error = np.zeros(maxdepth)
bias = np.zeros(maxdepth)
variance = np.zeros(maxdepth)
polydegree = np.zeros(maxdepth)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# we produce a simple tree first as benchmark
simpletree = DecisionTreeRegressor(max_depth=3)
simpletree.fit(X_train_scaled, y_train)
simpleprediction = simpletree.predict(X_test_scaled)
for degree in range(1,maxdepth):
model = DecisionTreeRegressor(max_depth=degree)
y_pred = np.empty((y_test.shape[0], n_boostraps))
for i in range(n_boostraps):
x_, y_ = resample(X_train_scaled, y_train)
model.fit(x_, y_)
y_pred[:, i] = model.predict(X_test_scaled)#.ravel()
polydegree[degree] = degree
error[degree] = np.mean( np.mean((y_test - y_pred)**2, axis=1, keepdims=True) )
bias[degree] = np.mean( (y_test - np.mean(y_pred, axis=1, keepdims=True))**2 )
variance[degree] = np.mean( np.var(y_pred, axis=1, keepdims=True) )
print('Polynomial degree:', degree)
print('Error:', error[degree])
print('Bias^2:', bias[degree])
print('Var:', variance[degree])
print('{} >= {} + {} = {}'.format(error[degree], bias[degree], variance[degree], bias[degree]+variance[degree]))
mse_simpletree = np.mean( np.mean((y_test - simpleprediction)**2)
plt.xlim(1,maxdepth)
plt.plot(polydegree, error, label='MSE simple tree')
plt.plot(polydegree, mse_simpletree, label='MSE for Bootstrap')
plt.plot(polydegree, bias, label='bias')
plt.plot(polydegree, variance, label='Variance')
plt.legend()
save_fig("baggingboot")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
# Logistic Regression
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
print("Test set accuracy with Logistic Regression: {:.2f}".format(logreg.score(X_test,y_test)))
# Support vector machine
svm = SVC(gamma='auto', C=100)
svm.fit(X_train, y_train)
print("Test set accuracy with SVM: {:.2f}".format(svm.score(X_test,y_test)))
# Decision Trees
deep_tree_clf = DecisionTreeClassifier(max_depth=None)
deep_tree_clf.fit(X_train, y_train)
print("Test set accuracy with Decision Trees: {:.2f}".format(deep_tree_clf.score(X_test,y_test)))
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Logistic Regression
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy Logistic Regression with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# Support Vector Machine
svm.fit(X_train_scaled, y_train)
print("Test set accuracy SVM with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# Decision Trees
deep_tree_clf.fit(X_train_scaled, y_train)
print("Test set accuracy with Decision Trees and scaled data: {:.2f}".format(deep_tree_clf.score(X_test_scaled,y_test)))
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_validate
# Data set not specificied
#Instantiate the model with 500 trees and entropy as splitting criteria
Random_Forest_model = RandomForestClassifier(n_estimators=500,criterion="entropy")
Random_Forest_model.fit(X_train_scaled, y_train)
#Cross validation
accuracy = cross_validate(Random_Forest_model,X_test_scaled,y_test,cv=10)['test_score']
print(accuracy)
print("Test set accuracy with Random Forests and scaled data: {:.2f}".format(Random_Forest_model.score(X_test_scaled,y_test)))
import scikitplot as skplt
y_pred = Random_Forest_model.predict(X_test_scaled)
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
plt.show()
y_probas = Random_Forest_model.predict_proba(X_test_scaled)
skplt.metrics.plot_roc(y_test, y_probas)
plt.show()
skplt.metrics.plot_cumulative_gain(y_test, y_probas)
plt.show()
bag_clf = BaggingClassifier(
DecisionTreeClassifier(splitter="random", max_leaf_nodes=16, random_state=42),
n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1, random_state=42)
bag_clf.fit(X_train, y_train)
y_pred = bag_clf.predict(X_test)
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1, random_state=42)
rnd_clf.fit(X_train, y_train)
y_pred_rf = rnd_clf.predict(X_test)
np.sum(y_pred == y_pred_rf) / len(y_pred)
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1), n_estimators=200,
algorithm="SAMME.R", learning_rate=0.5, random_state=42)
ada_clf.fit(X_train, y_train)
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1), n_estimators=200,
algorithm="SAMME.R", learning_rate=0.5, random_state=42)
ada_clf.fit(X_train_scaled, y_train)
y_pred = ada_clf.predict(X_test_scaled)
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
plt.show()
y_probas = ada_clf.predict_proba(X_test_scaled)
skplt.metrics.plot_roc(y_test, y_probas)
plt.show()
skplt.metrics.plot_cumulative_gain(y_test, y_probas)
plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.preprocessing import StandardScaler
import scikitplot as skplt
from sklearn.metrics import mean_squared_error
n = 100
maxdegree = 6
# Make data set.
x = np.linspace(-3, 3, n).reshape(-1, 1)
y = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2)+ np.random.normal(0, 0.1, x.shape)
error = np.zeros(maxdegree)
bias = np.zeros(maxdegree)
variance = np.zeros(maxdegree)
polydegree = np.zeros(maxdegree)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
for degree in range(1,maxdegree):
model = GradientBoostingRegressor(max_depth=degree, n_estimators=100, learning_rate=1.0)
model.fit(X_train_scaled,y_train)
y_pred = model.predict(X_test_scaled)
polydegree[degree] = degree
error[degree] = np.mean( np.mean((y_test - y_pred)**2) )
bias[degree] = np.mean( (y_test - np.mean(y_pred))**2 )
variance[degree] = np.mean( np.var(y_pred) )
print('Max depth:', degree)
print('Error:', error[degree])
print('Bias^2:', bias[degree])
print('Var:', variance[degree])
print('{} >= {} + {} = {}'.format(error[degree], bias[degree], variance[degree], bias[degree]+variance[degree]))
plt.xlim(1,maxdegree-1)
plt.plot(polydegree, error, label='Error')
plt.plot(polydegree, bias, label='bias')
plt.plot(polydegree, variance, label='Variance')
plt.legend()
save_fig("gdregression")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
import scikitplot as skplt
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import cross_validate
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
gd_clf = GradientBoostingClassifier(max_depth=3, n_estimators=100, learning_rate=1.0)
gd_clf.fit(X_train_scaled, y_train)
#Cross validation
accuracy = cross_validate(gd_clf,X_test_scaled,y_test,cv=10)['test_score']
print(accuracy)
print("Test set accuracy with Random Forests and scaled data: {:.2f}".format(gd_clf.score(X_test_scaled,y_test)))
import scikitplot as skplt
y_pred = gd_clf.predict(X_test_scaled)
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
save_fig("gdclassiffierconfusion")
plt.show()
y_probas = gd_clf.predict_proba(X_test_scaled)
skplt.metrics.plot_roc(y_test, y_probas)
save_fig("gdclassiffierroc")
plt.show()
skplt.metrics.plot_cumulative_gain(y_test, y_probas)
save_fig("gdclassiffiercgain")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
from sklearn.preprocessing import StandardScaler
import scikitplot as skplt
from sklearn.metrics import mean_squared_error
n = 100
maxdegree = 6
# Make data set.
x = np.linspace(-3, 3, n).reshape(-1, 1)
y = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2)+ np.random.normal(0, 0.1, x.shape)
error = np.zeros(maxdegree)
bias = np.zeros(maxdegree)
variance = np.zeros(maxdegree)
polydegree = np.zeros(maxdegree)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
for degree in range(maxdegree):
model = xgb.XGBRegressor(objective ='reg:squarederror', colsaobjective ='reg:squarederror', colsample_bytree = 0.3, learning_rate = 0.1,max_depth = degree, alpha = 10, n_estimators = 200)
model.fit(X_train_scaled,y_train)
y_pred = model.predict(X_test_scaled)
polydegree[degree] = degree
error[degree] = np.mean( np.mean((y_test - y_pred)**2) )
bias[degree] = np.mean( (y_test - np.mean(y_pred))**2 )
variance[degree] = np.mean( np.var(y_pred) )
print('Max depth:', degree)
print('Error:', error[degree])
print('Bias^2:', bias[degree])
print('Var:', variance[degree])
print('{} >= {} + {} = {}'.format(error[degree], bias[degree], variance[degree], bias[degree]+variance[degree]))
plt.xlim(1,maxdegree-1)
plt.plot(polydegree, error, label='Error')
plt.plot(polydegree, bias, label='bias')
plt.plot(polydegree, variance, label='Variance')
plt.legend()
plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_validate
import scikitplot as skplt
import xgboost as xgb
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
xg_clf = xgb.XGBClassifier()
xg_clf.fit(X_train_scaled,y_train)
y_test = xg_clf.predict(X_test_scaled)
print("Test set accuracy with Random Forests and scaled data: {:.2f}".format(xg_clf.score(X_test_scaled,y_test)))
import scikitplot as skplt
y_pred = xg_clf.predict(X_test_scaled)
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
save_fig("xdclassiffierconfusion")
plt.show()
y_probas = xg_clf.predict_proba(X_test_scaled)
skplt.metrics.plot_roc(y_test, y_probas)
save_fig("xdclassiffierroc")
plt.show()
skplt.metrics.plot_cumulative_gain(y_test, y_probas)
save_fig("gdclassiffiercgain")
plt.show()
xgb.plot_tree(xg_clf,num_trees=0)
plt.rcParams['figure.figsize'] = [50, 10]
save_fig("xgtree")
plt.show()
xgb.plot_importance(xg_clf)
plt.rcParams['figure.figsize'] = [5, 5]
save_fig("xgparams")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Display the PMF.
Step3: Define <tt>BiasPmf</tt>.
Step4: Make a the biased Pmf of children in the household, as observed if you surveyed the children instead of the respondents.
Step5: Display the actual Pmf and the biased Pmf on the same axes.
Step6: Compute the means of the two Pmfs.
|
<ASSISTANT_TASK:>
Python Code:
kids = resp['numkdhh']
kids
pmf = thinkstats2.Pmf(kids)
thinkplot.Pmf(pmf, label='PMF')
thinkplot.Show(xlabel='# of Children', ylabel='PMF')
def BiasPmf(pmf, label=''):
Returns the Pmf with oversampling proportional to value.
If pmf is the distribution of true values, the result is the
distribution that would be seen if values are oversampled in
proportion to their values; for example, if you ask students
how big their classes are, large classes are oversampled in
proportion to their size.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
biasedpmf = BiasPmf(pmf, label='BiasPMF')
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf,biasedpmf])
thinkplot.Show(xlabel='# of Children', ylabel='PMF')
pmf.Mean()
biasedpmf.Mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Deviation form thermal
Step2: Software version
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from qutip import *
import time
#number of states for each mode
N0=8
N1=8
N2=8
K=1.0
#damping rates
gamma0=0.1
gamma1=0.1
gamma2=0.4
alpha=sqrt(3)#initial coherent state param for mode 0
epsilon=0.5j #sqeezing parameter
tfinal=4.0
dt=0.05
tlist=arange(0.0,tfinal+dt,dt)
taulist=K*tlist #non-dimensional times
ntraj=100#number of trajectories
#define operators
a0=tensor(destroy(N0),qeye(N1),qeye(N2))
a1=tensor(qeye(N0),destroy(N1),qeye(N2))
a2=tensor(qeye(N0),qeye(N1),destroy(N2))
#number operators for each mode
num0=a0.dag()*a0
num1=a1.dag()*a1
num2=a2.dag()*a2
#dissipative operators for zero-temp. baths
C0=sqrt(2.0*gamma0)*a0
C1=sqrt(2.0*gamma1)*a1
C2=sqrt(2.0*gamma2)*a2
#initial state: coherent mode 0 & vacuum for modes #1 & #2
vacuum=tensor(basis(N0,0),basis(N1,0),basis(N2,0))
D=(alpha*a0.dag()-conj(alpha)*a0).expm()
psi0=D*vacuum
#trilinear Hamiltonian
H=1j*K*(a0*a1.dag()*a2.dag()-a0.dag()*a1*a2)
#run Monte-Carlo
start_time=time.time()
#avg=mcsolve(H,psi0,taulist,ntraj,[C0,C1,C2],[num0,num1,num2])
output=mesolve(H,psi0,taulist,[C0,C1,C2],[num0,num1,num2])
avg=output.expect
finish_time=time.time()
print('time elapsed = ',finish_time-start_time)
#plot expectation value for photon number in each mode
plot(taulist,avg[0],taulist,avg[1],taulist,avg[2])
xlabel("Time")
ylabel("Average number of particles")
legend(('Mode 0', 'Mode 1','Mode 2'));
from qutip import *
from pylab import *
import time
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#number of states for each mode
N0=6
N1=6
N2=6
#define operators
a0=tensor(destroy(N0),qeye(N1),qeye(N2))
a1=tensor(qeye(N0),destroy(N1),qeye(N2))
a2=tensor(qeye(N0),qeye(N1),destroy(N2))
#number operators for each mode
num0=a0.dag()*a0
num1=a1.dag()*a1
num2=a2.dag()*a2
#initial state: coherent mode 0 & vacuum for modes #1 & #2
alpha=sqrt(2)#initial coherent state param for mode 0
initial=tensor(coherent(N0,alpha),basis(N1,0),basis(N2,0))
psi0=initial
#trilinear Hamiltonian
H=1.0j*(a0*a1.dag()*a2.dag()-a0.dag()*a1*a2)
#run Monte-Carlo
tlist=linspace(0,2.5,50)
output=mcsolve(H,psi0,tlist,[],[],1)
mode1=[ptrace(k,1) for k in output.states]
diags1=[real(k.diag()) for k in mode1]
num1=[expect(num1,k) for k in output.states]
thermal=[thermal_dm(N1,k).diag() for k in num1]
colors=['m', 'g','orange','b', 'y','pink']
x=range(N1)
params = {'axes.labelsize': 14,'font.size': 14,'legend.fontsize': 12,
'xtick.labelsize': 14,'ytick.labelsize': 14}
rcParams.update(params)
fig = plt.figure(figsize=(8,6))
ax = Axes3D(fig)
for j in range(5):
ax.bar(x, diags1[10*j], zs=tlist[10*j], zdir='y',color=colors[j],linewidth=1.0,
alpha=0.6,align='center')
ax.plot(x,thermal[10*j],zs=tlist[10*j],zdir='y',color='r',linewidth=3,alpha=1)
ax.set_zlabel(r'Probability')
ax.set_xlabel(r'Number State')
ax.set_ylabel(r'Time')
ax.set_zlim3d(0, 1);
from qutip.ipynbtools import version_table
version_table()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build a time series container with Datetime indices
Step2: Right now TimeBasedFCParameters only contains linear_trend_timewise, which performs a calculation of a linear trend, but using the time difference in hours between measurements in order to perform the linear regression. As always, you can add your own functions in tsfresh/feature_extraction/feature_calculators.py.
Step3: We extract the features as usual, specifying the column value, kind, and id.
Step4: The output looks exactly, like usual. If we compare it with the 'regular' linear_trend feature calculator, we can see that the intercept, p and R values are the same, as we'd expect – only the slope is now different.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from tsfresh.feature_extraction import extract_features
# TimeBasedFCParameters contains all functions that use the Datetime index of the timeseries container
from tsfresh.feature_extraction.settings import TimeBasedFCParameters
df = pd.DataFrame({"id": ["a", "a", "a", "a", "b", "b", "b", "b"],
"value": [1, 2, 3, 1, 3, 1, 0, 8],
"kind": ["temperature", "temperature", "pressure", "pressure",
"temperature", "temperature", "pressure", "pressure"]},
index=pd.DatetimeIndex(
['2019-03-01 10:04:00', '2019-03-01 10:50:00', '2019-03-02 00:00:00', '2019-03-02 09:04:59',
'2019-03-02 23:54:12', '2019-03-03 08:13:04', '2019-03-04 08:00:00', '2019-03-04 08:01:00']
))
df = df.sort_index()
df
settings_time = TimeBasedFCParameters()
settings_time
X_tsfresh = extract_features(df, column_id="id", column_value='value', column_kind='kind',
default_fc_parameters=settings_time)
X_tsfresh.head()
settings_regular = {'linear_trend': [
{'attr': 'pvalue'},
{'attr': 'rvalue'},
{'attr': 'intercept'},
{'attr': 'slope'},
{'attr': 'stderr'}
]}
X_tsfresh = extract_features(df, column_id="id", column_value='value', column_kind='kind',
default_fc_parameters=settings_regular)
X_tsfresh.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Apple Stocks
Step2: Need to do better than O(n^2), so it'll probably be either O(n log n) or O(n). Let's try a greedy approach.
|
<ASSISTANT_TASK:>
Python Code:
from functools import reduce
def get_products_of_all_ints_except_at_index(arr):
results = []
if len(arr) < 2:
raise Exception("Arrays too short, can't do it")
for index, value in enumerate(arr):
new_array = arr[0:index] + arr[index+1:]
product = reduce((lambda x, y: x * y), new_array)
results.append(product)
return results
arr = [1, 7, 3, 4]
print(get_products_of_all_ints_except_at_index(arr))
# arr = [1]
# print(get_products_of_all_ints_except_at_index(arr))
arr = [1, 2]
print(get_products_of_all_ints_except_at_index(arr))
from IPython.core.display import Image, display
from IPython.display import Image, Markdown
import random
def get_max_profit(stock_prices):
'''returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday.
have to buy before selling
O(n^2) Solution
'''
max_profit = 0
for inner_index in range(len(stock_prices)):
for outer_index in range(len(stock_prices)):
earlier_time = min(inner_index, outer_index)
later_time = max(inner_index, outer_index)
# Get the earlier/later prices for correct ordering
earlier_price = stock_prices[earlier_time]
later_price = stock_prices[later_time]
potential_profit = later_price - earlier_price
max_profit = max(max_profit, potential_profit)
return max_profit
stock_prices_yesterday = []
print(get_max_profit(stock_prices_yesterday) == 0)
stock_prices_yesterday = [6]
print(get_max_profit(stock_prices_yesterday) == 0)
stock_prices_yesterday = [10, 7, 5, 8, 11, 9]
print(get_max_profit(stock_prices_yesterday) == 6)
def get_max_profit(stock_prices):
'''returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday.
have to buy before selling
O(n^2) Solution
'''
max_profit = 0
for inner_index in range(len(stock_prices)):
for outer_index in range(len(stock_prices[inner_index:])):
earlier_time = min(inner_index, outer_index)
later_time = max(inner_index, outer_index)
# Get the earlier/later prices for correct ordering
earlier_price = stock_prices[earlier_time]
later_price = stock_prices[later_time]
potential_profit = later_price - earlier_price
max_profit = max(max_profit, potential_profit)
return max_profit
stock_prices_yesterday = []
print(get_max_profit(stock_prices_yesterday) == 0)
stock_prices_yesterday = [6]
print(get_max_profit(stock_prices_yesterday) == 0)
stock_prices_yesterday = [10, 7, 5, 8, 11, 9]
print(get_max_profit(stock_prices_yesterday) == 6) #incorrect
def get_max_profit(stock_prices):
'''returns the best profit I could have made from 1 purchase and 1 sale of 1 Apple stock yesterday.
have to buy before selling.
Came up with this on my own! Runtime is O(n)
'''
if len(stock_prices) < 2:
return 0
min_buy = stock_prices[0]
max_sell = stock_prices[1]
print(min_buy, max_sell)
for time in range(len(stock_prices)-1):
if time == 0:
continue
buy = stock_prices[time]
sell = stock_prices[time+1]
min_buy = min(min_buy, buy)
max_sell = max(max_sell, sell)
print(min_buy, max_sell)
return max_sell - min_buy
# stock_prices_yesterday = []
# print(get_max_profit(stock_prices_yesterday) == 0)
# stock_prices_yesterday = [6]
# print(get_max_profit(stock_prices_yesterday) == 0)
stock_prices_yesterday = [10, 7, 5, 8, 11, 9]
print(get_max_profit(stock_prices_yesterday))
# stock_prices_yesterday = [4, 3, 2, 1]
# print(get_max_profit(stock_prices_yesterday))
# stock_prices_yesterday = [4, 4, 4, 4]
# print(get_max_profit(stock_prices_yesterday))
def get_max_profit(stock_prices_yesterday):
# make sure we have at least 2 prices
if len(stock_prices_yesterday) < 2:
raise IndexError('Getting a profit requires at least 2 prices')
# we'll greedily update min_price and max_profit, so we initialize
# them to the first price and the first possible profit
min_price = stock_prices_yesterday[0]
max_profit = stock_prices_yesterday[1] - stock_prices_yesterday[0]
for index, current_price in enumerate(stock_prices_yesterday):
# skip the first (0th) time
# we can't sell at the first time, since we must buy first,
# and we can't buy and sell at the same time!
# if we took this out, we'd try to buy *and* sell at time 0.
# this would give a profit of 0, which is a problem if our
# max_profit is supposed to be *negative*--we'd return 0.
if index == 0:
continue
# see what our profit would be if we bought at the
# min price and sold at the current price
potential_profit = current_price - min_price
# update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
# update min_price so it's always
# the lowest price we've seen so far
min_price = min(min_price, current_price)
return max_profit
stock_prices_yesterday = [10, 7, 5, 8, 11, 9]
print(get_max_profit(stock_prices_yesterday))
stock_prices_yesterday = [4, 3, 2, 1]
print(get_max_profit(stock_prices_yesterday))
stock_prices_yesterday = [4, 4, 4, 4]
print(get_max_profit(stock_prices_yesterday))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using pyplot's imshow to display the image
Step2: Displaying the image inside a bqplot Figure
Step3: Mixing with other marks
Step4: Its traits (attributes) will also respond dynamically to a change from the backend
|
<ASSISTANT_TASK:>
Python Code:
import os
import ipywidgets as widgets
import bqplot.pyplot as plt
from bqplot import LinearScale
image_path = os.path.abspath('../../data_files/trees.jpg')
with open(image_path, 'rb') as f:
raw_image = f.read()
ipyimage = widgets.Image(value=raw_image, format='jpg')
ipyimage
plt.figure(padding_y=0)
axes_options = {'x': {'visible': False}, 'y': {'visible': False}}
plt.imshow(image_path, 'filename')
plt.show()
fig = plt.figure(title='Trees', padding_x=0, padding_y=0)
image = plt.imshow(ipyimage, 'widget')
fig
fig = plt.figure(padding_x=0, padding_y=0)
plt.scales(scales={'x': LinearScale(min=-1, max=2),
'y': LinearScale(min=-0.5, max=2)})
image = plt.imshow(ipyimage, format='widget')
plt.plot([0, 1, 1, 0, 0], [0, 0, 1, 1, 0], 'r')
fig
# Full screen
image.x = [-1, 2]
image.y = [-.5, 2]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lambda is another way of defining a function
Step2: Why would I want something like this?
Step3: Operators to the rescure
Step4: Lets do some maths
Step5: Function returns Function
Step6: Partial Functions
Step7: debug("Error 404") = partial(logger, "debug")("Error 404")
Step8: Currying
Step9: Currying gets you specialized functions from more general functions
Step10: Benefits
Step11: Recursion
Step12: Sadly, no tail recursion
Step13: Generators
Step14: Pipelines
Step15: No Modularity
Step16: f(g(h(i(...x)))
|
<ASSISTANT_TASK:>
Python Code:
def mul(a, b):
return a*b
mul(2, 3)
mul = lambda a, b: a*b
mul(2, 3)
mul(mul(2, 3), 3)
def transform_and_add(func, a, b):
return func(a) + func(b)
transform_and_add(lambda x: x**2, 1, 2)
def square_and_add(a, b):
return (a**2 + b**2)
def cube_and_add(a, b):
return (a**3 + b**3)
def quad_and_add(a, b):
return (a**4 + b**4)
print(square_and_add(1, 2))
print(cube_and_add(1, 2))
print(quad_and_add(1, 2))
square = lambda x: x**2
cube = lambda x: x**3
quad = lambda x: x**4
print(square_and_add(1, 2) == transform_and_add(square, 1, 2))
print(cube_and_add(1, 2) == transform_and_add(cube, 1, 2))
print(quad_and_add(1, 2) == transform_and_add(quad, 1, 2))
def square_and_add(a, b):
return (a**2 + b**2)
def cube_and_mul(a, b):
return ((a**3) * (b**3))
def quad_and_div(a, b):
return ((a**4) / (b**4))
print(square_and_add(1, 2))
print(cube_and_mul(1, 2))
print(quad_and_div(1, 2))
def transform_and_reduce(func_transform, func_reduce, a, b):
return func_reduce(func_transform(a), func_transform(b))
print(square_and_add(1, 2) == transform_and_reduce(square, lambda x, y: x+y, 1, 2))
print(cube_and_mul(1, 2) == transform_and_reduce(cube, lambda x, y: x*y, 1, 2))
print(quad_and_div(1, 2) == transform_and_reduce(quad, lambda x, y: x/y, 1, 2))
import operator
print(square_and_add(1, 2) == transform_and_reduce(square, operator.add, 1, 2))
print(cube_and_mul(1, 2) == transform_and_reduce(cube, operator.mul, 1, 2))
print(quad_and_div(1, 2) == transform_and_reduce(quad, operator.truediv, 1, 2))
print(square_and_add(1, 2) == transform_and_reduce(lambda x: x**2, lambda x, y: x+y, 1, 2))
print(cube_and_mul(1, 2) == transform_and_reduce(lambda x: x**3, lambda x, y: x*y, 1, 2))
print(quad_and_div(1, 2) == transform_and_reduce(lambda x: x**4, lambda x, y: x/y, 1, 2))
from time import time
def timer(func):
def inner(*args, **kwargs):
t = time()
func(*args, **kwargs)
print("Time take = {time}".format(time = time() - t))
return inner
def echo_func(input):
print(input)
timed_echo = timer(echo_func)
timed_echo(1000000)
def logger(level, message):
print("{level}: {message}".format(level = level, message = message))
def debug(message):
return logger("debug", message)
def info(messgae):
return logger("info", message)
debug("Error 404")
from functools import partial
debug = partial(logger, "debug")
info = partial(logger, "info")
debug("Error 404")
partial(logger, "debug")("Error 404")
def transform_and_add(func_transform, a, b):
return func_transform(a) + func_transform(b)
def curry_transform_and_add(func_transform):
def apply(a, b):
return func_transform(a) + func_transform(b)
return apply
print(transform_and_add(cube, 1, 2) == curry_transform_and_add(cube)(1, 2))
input_list = [1, 2, 3, 4]
squared_list = map(lambda x: x**2, input_list)
print(type(squared_list))
print(next(squared_list))
print(next(squared_list))
from functools import reduce
sum_list = reduce(operator.add, input_list)
print(sum_list)
sum_squared_list = reduce(operator.add,
map(lambda x: x**2, input_list))
print(sum_squared_list)
even_list = list(
filter(lambda x: x%2==0, input_list))
sum_even_list = reduce(operator.add, even_list)
print(sum_even_list)
print(reduce(operator.add,
(map(lambda x: x**2,
filter(lambda x: x%2==0, input_list)))))
from itertools import accumulate
acc = accumulate(input_list, operator.add)
print(input_list)
print(type(acc))
print(next(acc))
print(next(acc))
print(next(acc))
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n - 1)
print(input_list)
collection = list()
is_even = lambda x: x%2==0
for data in input_list:
if(is_even(data)):
collection.append(data)
else:
collection.append(data*2)
print(collection)
collection = [data if is_even(data) else data*2
for data in input_list]
print(collection)
collection = (data if is_even(data) else data*2
for data in input_list)
print(collection)
def pipeline_each(data, fns):
return reduce(lambda a, x: map(x, a),
fns,
data)
import re
strings_to_clean = ["apple https://www.apple.com/",
"google https://www.google.com/",
"facebook https://www.facebook.com/"]
def format_string(input_string):
return re.sub(r"http\S+", "", input_string).strip().title()
for _str in map(format_string, strings_to_clean):
print(_str)
import re
def remove_url(input_string):
return re.sub(r"http\S+", "", input_string).strip()
def title_case(input_string):
return input_string.title()
def format_string(input_string):
return title_case(remove_url(input_string))
for _str in map(format_string, strings_to_clean):
print(_str)
import re
for _str in pipeline_each(strings_to_clean, [remove_url,
title_case]):
print(_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def unixTimeToHumanReadable(seconds ) :
ans = ""
daysOfMonth =[31 , 28 , 31 , 30 , 31 , 30 , 31 , 31 , 30 , 31 , 30 , 31 ]
(currYear , daysTillNow , extraTime , extraDays , index , date , month , hours , minutes , secondss , flag ) =(0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
daysTillNow = seconds //(24 * 60 * 60 )
extraTime = seconds %(24 * 60 * 60 )
currYear = 1970
while(daysTillNow >= 365 ) :
if(currYear % 400 == 0 or(currYear % 4 == 0 and currYear % 100 != 0 ) ) :
daysTillNow -= 366
else :
daysTillNow -= 365
currYear += 1
extraDays = daysTillNow + 1
if(currYear % 400 == 0 or(currYear % 4 == 0 and currYear % 100 != 0 ) ) :
flag = 1
month = 0
index = 0
if(flag == 1 ) :
while(True ) :
if(index == 1 ) :
if(extraDays - 29 < 0 ) :
break
month += 1
extraDays -= 29
else :
if(extraDays - daysOfMonth[index ] < 0 ) :
break
month += 1
extraDays -= daysOfMonth[index ]
index += 1
else :
while(True ) :
if(extraDays - daysOfMonth[index ] < 0 ) :
break
month += 1
extraDays -= daysOfMonth[index ]
index += 1
if(extraDays > 0 ) :
month += 1
date = extraDays
else :
if(month == 2 and flag == 1 ) :
date = 29
else :
date = daysOfMonth[month - 1 ]
hours = extraTime // 3600
minutes =(extraTime % 3600 ) // 60
secondss =(extraTime % 3600 ) % 60
ans += str(date )
ans += "/ "
ans += str(month )
ans += "/ "
ans += str(currYear )
ans += "▁ "
ans += str(hours )
ans += ": "
ans += str(minutes )
ans += ": "
ans += str(secondss )
return ans
if __name__== "__main __":
T = 1595497956
ans = unixTimeToHumanReadable(T )
print(ans )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The frame represented by video 98, frame 1 is shown here
Step2: Try it!
Step3: Build the training set
Step4: The training data in training is an object of class WordsData defined in the asl_data module. in addition to the words list, data can be accessed with the get_all_sequences, get_all_Xlengths, get_word_sequences, and get_word_Xlengths methods. We need the get_word_Xlengths method to train multiple sequences with the hmmlearn library. In the following example, notice that there are two lists; the first is a concatenation of all the sequences(the X portion) and the second is a list of the sequence lengths(the Lengths portion).
Step5: More feature sets
Step6: To select a mean that matches by speaker, use the pandas map method
Step7: Try it!
Step8: <a id='part1_submission'></a>
Step9: Question 1
Step10: <a id='part2_tutorial'></a>
Step11: The HMM model has been trained and information can be pulled from the model, including means and variances for each feature and hidden state. The log likelihood for any individual sample or group of samples can also be calculated with the score method.
Step12: Try it!
Step14: Visualize the hidden states
Step15: ModelSelector class
Step16: Cross-validation folds
Step17: Tip
Step18: Question 2
Step19: <a id='part3_tutorial'></a>
Step20: Load the test set
Step21: <a id='part3_submission'></a>
Step22: Question 3
Step23: <a id='part4_info'></a>
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from asl_data import AslDb
asl = AslDb() # initializes the database
asl.df.head() # displays the first five rows of the asl database, indexed by video and frame
asl.df.ix[98,1] # look at the data available for an individual frame
asl.df['grnd-ry'] = asl.df['right-y'] - asl.df['nose-y']
asl.df.head() # the new feature 'grnd-ry' is now in the frames dictionary
from asl_utils import test_features_tryit
# TODO add df columns for 'grnd-rx', 'grnd-ly', 'grnd-lx' representing differences between hand and nose locations
asl.df['grnd-rx'] = asl.df['right-x'] - asl.df['nose-x']
asl.df['grnd-ly'] = asl.df['left-y'] - asl.df['nose-y']
asl.df['grnd-lx'] = asl.df['left-x'] - asl.df['nose-x']
# test the code
test_features_tryit(asl)
# collect the features into a list
features_ground = ['grnd-rx','grnd-ry','grnd-lx','grnd-ly']
#show a single set of features for a given (video, frame) tuple
[asl.df.ix[98,1][v] for v in features_ground]
training = asl.build_training(features_ground)
print("Training words: {}".format(training.words))
training.get_word_Xlengths('CHOCOLATE')
df_means = asl.df.groupby('speaker').mean()
df_means
asl.df['left-x-mean']= asl.df['speaker'].map(df_means['left-x'])
asl.df.head()
from asl_utils import test_std_tryit
# TODO Create a dataframe named `df_std` with standard deviations grouped by speaker
df_std = asl.df.groupby('speaker').std()
# test the code
test_std_tryit(df_std)
# TODO add features for normalized by speaker values of left, right, x, y
# Name these 'norm-rx', 'norm-ry', 'norm-lx', and 'norm-ly'
# using Z-score scaling (X-Xmean)/Xstd
features_norm = ['norm-rx', 'norm-ry', 'norm-lx','norm-ly']
# Mean matched by speaker
asl.df['right-x-mean']= asl.df['speaker'].map(df_means['right-x'])
asl.df['right-y-mean']= asl.df['speaker'].map(df_means['right-y'])
asl.df['left-x-mean']= asl.df['speaker'].map(df_means['left-x'])
asl.df['left-y-mean']= asl.df['speaker'].map(df_means['left-y'])
# Std dev matched by speaker
asl.df['right-x-std']= asl.df['speaker'].map(df_std['right-x'])
asl.df['right-y-std']= asl.df['speaker'].map(df_std['right-y'])
asl.df['left-x-std']= asl.df['speaker'].map(df_std['left-x'])
asl.df['left-y-std']= asl.df['speaker'].map(df_std['left-y'])
# Add the actual normalized scores
asl.df['norm-rx'] = (asl.df['right-x'] - asl.df['right-x-mean']) / asl.df['right-x-std']
asl.df['norm-ry'] = (asl.df['right-y'] - asl.df['right-y-mean']) / asl.df['right-y-std']
asl.df['norm-lx'] = (asl.df['left-x'] - asl.df['left-x-mean']) / asl.df['left-x-std']
asl.df['norm-ly'] = (asl.df['left-y'] - asl.df['left-y-mean']) / asl.df['left-y-std']
# TODO add features for polar coordinate values where the nose is the origin
# Name these 'polar-rr', 'polar-rtheta', 'polar-lr', and 'polar-ltheta'
# Note that 'polar-rr' and 'polar-rtheta' refer to the radius and angle
'''
calculate polar coordinates with Cartesian to polar equations
use the np.arctan2 function and swap the x and y axes to move the 00 to 2π2π discontinuity
to 12 o'clock instead of 3 o'clock; in other words, the normal break in radians value from 00 to 2π2π
occurs directly to the left of the speaker's nose, which may be in the signing area and interfere with results.
By swapping the x and y axes, that discontinuity move to directly above the speaker's head, an area not generally
used in signing.
'''
features_polar = ['polar-rr', 'polar-rtheta', 'polar-lr', 'polar-ltheta']
asl.df['polar-rr'] = np.sqrt(asl.df['grnd-rx']**2 + asl.df['grnd-ry']**2)
asl.df['polar-rtheta'] = np.arctan2(asl.df['grnd-rx'], asl.df['grnd-ry'])
asl.df['polar-lr'] = np.sqrt(asl.df['grnd-lx']**2 + asl.df['grnd-ly']**2)
asl.df['polar-ltheta'] = np.arctan2(asl.df['grnd-lx'], asl.df['grnd-ly'])
# TODO add features for left, right, x, y differences by one time step, i.e. the "delta" values discussed in the lecture
# Name these 'delta-rx', 'delta-ry', 'delta-lx', and 'delta-ly'
features_delta = ['delta-rx', 'delta-ry', 'delta-lx', 'delta-ly']
asl.df['delta-rx'] = asl.df['grnd-rx'].diff()
asl.df['delta-ry'] = asl.df['grnd-ry'].diff()
asl.df['delta-lx'] = asl.df['grnd-lx'].diff()
asl.df['delta-ly'] = asl.df['grnd-ly'].diff()
# Fill with 0 values
asl.df = asl.df.fillna(0)
# TODO add features of your own design, which may be a combination of the above or something else
# Name these whatever you would like
# TODO define a list named 'features_custom' for building the training set
# Normalize polar coordinates
features_polar_norm = ['pnorm-rx', 'pnorm-ry', 'pnorm-lx','pnorm-ly']
df_means = asl.df.groupby('speaker').mean()
df_std = asl.df.groupby('speaker').std()
# Mean matched by speaker
asl.df['polar-rr-mean']= asl.df['speaker'].map(df_means['polar-rr'])
asl.df['polar-rtheta-mean']= asl.df['speaker'].map(df_means['polar-rtheta'])
asl.df['polar-lr-mean']= asl.df['speaker'].map(df_means['polar-lr'])
asl.df['polar-ltheta-mean']= asl.df['speaker'].map(df_means['polar-ltheta'])
# Std dev matched by speaker
asl.df['polar-rr-std']= asl.df['speaker'].map(df_std['polar-rr'])
asl.df['polar-rtheta-std']= asl.df['speaker'].map(df_std['polar-rtheta'])
asl.df['polar-lr-std']= asl.df['speaker'].map(df_std['polar-lr'])
asl.df['polar-ltheta-std']= asl.df['speaker'].map(df_std['polar-ltheta'])
# Add the actual normalized scores
asl.df['pnorm-rx'] = (asl.df['polar-rr'] - asl.df['polar-rr-mean']) / asl.df['polar-rr-std']
asl.df['pnorm-ry'] = (asl.df['polar-rtheta'] - asl.df['polar-rtheta-mean']) / asl.df['polar-rtheta-std']
asl.df['pnorm-lx'] = (asl.df['polar-lr'] - asl.df['polar-lr-mean']) / asl.df['polar-lr-std']
asl.df['pnorm-ly'] = (asl.df['polar-ltheta'] - asl.df['polar-ltheta-mean']) / asl.df['polar-ltheta-std']
import unittest
# import numpy as np
class TestFeatures(unittest.TestCase):
def test_features_ground(self):
sample = (asl.df.ix[98, 1][features_ground]).tolist()
self.assertEqual(sample, [9, 113, -12, 119])
def test_features_norm(self):
sample = (asl.df.ix[98, 1][features_norm]).tolist()
np.testing.assert_almost_equal(sample, [ 1.153, 1.663, -0.891, 0.742], 3)
def test_features_polar(self):
sample = (asl.df.ix[98,1][features_polar]).tolist()
np.testing.assert_almost_equal(sample, [113.3578, 0.0794, 119.603, -0.1005], 3)
def test_features_delta(self):
sample = (asl.df.ix[98, 0][features_delta]).tolist()
self.assertEqual(sample, [0, 0, 0, 0])
sample = (asl.df.ix[98, 18][features_delta]).tolist()
self.assertTrue(sample in [[-16, -5, -2, 4], [-14, -9, 0, 0]], "Sample value found was {}".format(sample))
suite = unittest.TestLoader().loadTestsFromModule(TestFeatures())
unittest.TextTestRunner().run(suite)
import warnings
from hmmlearn.hmm import GaussianHMM
def train_a_word(word, num_hidden_states, features):
warnings.filterwarnings("ignore", category=DeprecationWarning)
training = asl.build_training(features)
X, lengths = training.get_word_Xlengths(word)
model = GaussianHMM(n_components=num_hidden_states, n_iter=1000).fit(X, lengths)
logL = model.score(X, lengths)
return model, logL
demoword = 'BOOK'
model, logL = train_a_word(demoword, 3, features_ground)
print("Number of states trained in model for {} is {}".format(demoword, model.n_components))
print("logL = {}".format(logL))
def show_model_stats(word, model):
print("Number of states trained in model for {} is {}".format(word, model.n_components))
variance=np.array([np.diag(model.covars_[i]) for i in range(model.n_components)])
for i in range(model.n_components): # for each hidden state
print("hidden state #{}".format(i))
print("mean = ", model.means_[i])
print("variance = ", variance[i])
print()
show_model_stats(demoword, model)
my_testword = 'CHOCOLATE'
model, logL = train_a_word(my_testword, 3, features_ground) # Experiment here with different parameters
show_model_stats(my_testword, model)
print("logL = {}".format(logL))
%matplotlib inline
import math
from matplotlib import (cm, pyplot as plt, mlab)
def visualize(word, model):
visualize the input model for a particular word
variance=np.array([np.diag(model.covars_[i]) for i in range(model.n_components)])
figures = []
for parm_idx in range(len(model.means_[0])):
xmin = int(min(model.means_[:,parm_idx]) - max(variance[:,parm_idx]))
xmax = int(max(model.means_[:,parm_idx]) + max(variance[:,parm_idx]))
fig, axs = plt.subplots(model.n_components, sharex=True, sharey=False)
colours = cm.rainbow(np.linspace(0, 1, model.n_components))
for i, (ax, colour) in enumerate(zip(axs, colours)):
x = np.linspace(xmin, xmax, 100)
mu = model.means_[i,parm_idx]
sigma = math.sqrt(np.diag(model.covars_[i])[parm_idx])
ax.plot(x, mlab.normpdf(x, mu, sigma), c=colour)
ax.set_title("{} feature {} hidden state #{}".format(word, parm_idx, i))
ax.grid(True)
figures.append(plt)
for p in figures:
p.show()
visualize(my_testword, model)
from my_model_selectors import SelectorConstant
training = asl.build_training(features_ground) # Experiment here with different feature sets defined in part 1
word = 'VEGETABLE' # Experiment here with different words
model = SelectorConstant(training.get_all_sequences(), training.get_all_Xlengths(), word, n_constant=3).select()
print("Number of states trained in model for {} is {}".format(word, model.n_components))
from sklearn.model_selection import KFold
training = asl.build_training(features_ground) # Experiment here with different feature sets
word = 'VEGETABLE' # Experiment here with different words
word_sequences = training.get_word_sequences(word)
split_method = KFold()
for cv_train_idx, cv_test_idx in split_method.split(word_sequences):
print("Train fold indices:{} Test fold indices:{}".format(cv_train_idx, cv_test_idx)) # view indices of the folds
words_to_train = ['FISH', 'BOOK', 'VEGETABLE', 'FUTURE', 'JOHN']
import timeit
# TODO: Implement SelectorCV in my_model_selector.py
%load_ext autoreload
%autoreload 2
from my_model_selectors import SelectorCV
training = asl.build_training(features_ground) # Experiment here with different feature sets defined in part 1
sequences = training.get_all_sequences()
Xlengths = training.get_all_Xlengths()
for word in words_to_train:
start = timeit.default_timer()
model = SelectorCV(sequences, Xlengths, word,
min_n_components=2, max_n_components=15, random_state = 14).select()
end = timeit.default_timer()-start
if model is not None:
print("Training complete for {} with {} states with time {} seconds".format(word, model.n_components, end))
else:
print("Training failed for {}".format(word))
# TODO: Implement SelectorBIC in module my_model_selectors.py
%load_ext autoreload
%autoreload 2
from my_model_selectors import SelectorBIC
training = asl.build_training(features_ground) # Experiment here with different feature sets defined in part 1
sequences = training.get_all_sequences()
Xlengths = training.get_all_Xlengths()
for word in words_to_train:
start = timeit.default_timer()
model = SelectorBIC(sequences, Xlengths, word,
min_n_components=2, max_n_components=15, random_state = 14).select()
end = timeit.default_timer()-start
if model is not None:
print("Training complete for {} with {} states with time {} seconds".format(word, model.n_components, end))
else:
print("Training failed for {}".format(word))
# TODO: Implement SelectorDIC in module my_model_selectors.py
%load_ext autoreload
%autoreload 2
from my_model_selectors import SelectorDIC
training = asl.build_training(features_ground) # Experiment here with different feature sets defined in part 1
sequences = training.get_all_sequences()
Xlengths = training.get_all_Xlengths()
for word in words_to_train:
start = timeit.default_timer()
model = SelectorDIC(sequences, Xlengths, word,
min_n_components=2, max_n_components=15, random_state = 14).select()
end = timeit.default_timer()-start
if model is not None:
print("Training complete for {} with {} states with time {} seconds".format(word, model.n_components, end))
else:
print("Training failed for {}".format(word))
from asl_test_model_selectors import TestSelectors
suite = unittest.TestLoader().loadTestsFromModule(TestSelectors())
unittest.TextTestRunner().run(suite)
# autoreload for automatically reloading changes made in my_model_selectors and my_recognizer
%load_ext autoreload
%autoreload 2
from my_model_selectors import SelectorConstant
def train_all_words(features, model_selector):
training = asl.build_training(features) # Experiment here with different feature sets defined in part 1
sequences = training.get_all_sequences()
Xlengths = training.get_all_Xlengths()
model_dict = {}
for word in training.words:
model = model_selector(sequences, Xlengths, word,
n_constant=3).select()
model_dict[word]=model
return model_dict
models = train_all_words(features_ground, SelectorConstant)
print("Number of word models returned = {}".format(len(models)))
test_set = asl.build_test(features_ground)
print("Number of test set items: {}".format(test_set.num_items))
print("Number of test set sentences: {}".format(len(test_set.sentences_index)))
# TODO implement the recognize method in my_recognizer
%load_ext autoreload
%autoreload 2
from my_recognizer import recognize
from asl_utils import show_errors
# TODO Choose a feature set and model selector
features = features_ground
model_selector = SelectorCV
# TODO Recognize the test set and display the result with the show_errors method
models = train_all_words(features, model_selector)
test_set = asl.build_test(features)
probabilities, guesses = recognize(models, test_set)
show_errors(guesses, test_set)
# TODO Choose a feature set and model selector
features = features_polar
model_selector = SelectorBIC
# TODO Recognize the test set and display the result with the show_errors method
models = train_all_words(features, model_selector)
test_set = asl.build_test(features)
probabilities, guesses = recognize(models, test_set)
show_errors(guesses, test_set)
# TODO Choose a feature set and model selector
features = features_polar
model_selector = SelectorDIC
# TODO Recognize the test set and display the result with the show_errors method
models = train_all_words(features, model_selector)
test_set = asl.build_test(features)
probabilities, guesses = recognize(models, test_set)
show_errors(guesses, test_set)
from asl_test_recognizer import TestRecognize
suite = unittest.TestLoader().loadTestsFromModule(TestRecognize())
unittest.TextTestRunner().run(suite)
# create a DataFrame of log likelihoods for the test word items
df_probs = pd.DataFrame(data=probabilities)
df_probs.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Afterwards, we created a function calculate each of the elements of matrix H, the Butterworth filter.
Step2: Below we have a function to create matrix H that define a Butterworth filter for an image of (w,h) dimensions
Step3: As of matrix C, each of its rows has m*n elements corresponding to the exponential term in the Fourier transform. Each line correspond to a (u,v) pair of the H_hat vector that has N² elements and iterates though
Step4: The following function calculate each of (u,v) lines of the matrix C
Step5: Then, the function below apply the Butterworth filter to an image of dimensions (480,640) and create a matrix H of this filter with the same dimensions
Step6: Now we create the smaller kernel by applying the pseudo-inverse of matrix C to the matrix H
Step7: Execute all the functions
Step8: second part
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import cv2
import matplotlib.pyplot as plt
#image is height: 480, width: 640
#M:u:x:col:width
#N:v:y:row:height
#Calculate (u,v) distance from center of image
def getDValue(u,v,w,h):
return np.sqrt((u - (w/2.0))**2 + (v - (h/2.0))**2)
#Calculate element of Butterworth filter in matrix H given (u,v) pair,
#cut-off frequency d0, order term n, width w and height h
def getHValue(u,v,d0,n,w,h):
denom = 1.0 + ((getDValue(u,v,w,h)/d0)**(2*n))
return 1.0/denom
#Create a Butterworth filter as matrix H given image dimensions (w,h), cutoff frequency d0, order n, width w and
#height h. Calculate matrix H following openCV's standards, that is, with shape (h,w,2).
#Each cell has 2 float values, the real part and the imaginary part
def makeMatrixHCV2(w,h,d0,n):
matH = np.ones((h,w,2),dtype=np.float32)
for i in range(h):
for j in range(w):
matH[i,j] = getHValue(i,j,d0,n,w,h)
return matH
#Calculate a row for the matrix C
#Matrix C has format N^2 x n^2.
#For example, if we want a kernel ^H with shape 3x3 to aproximate the Butterworth filter H with shape (N=480, M=640) we will have matrix C with shape (N^2,n^2)
#So, every line of matrix C has n^2 elements for each (u,v) pair of matrix ^H
def getCRow(M,N,m,n,u,v):
return [np.exp(-2j*np.pi*(((x*u)/M) + ((y*v)/N))) for x in xrange(m) for y in xrange(n)]
#Build matrix C by calculating its lines for each (u,v) pair
def makeMatrixC(M,N,m,n):
matC = list()
for u in xrange(N):
for v in xrange(N):
matC.append(getCRow(M,N,m,n,u,v))
return np.array(matC,dtype=np.complex)
#Apply Butterworth filter on an image with dimensions (h=480, w=640)
#Follows openCV's standard in that every matrix element is double valued with an real part and a imaginary part
def calcCV2():
img = cv2.imread("Lenna.png",0)
img_dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
img_shift = np.fft.fftshift(img_dft)
matH = makeMatrixHCV2(640,480,10,1) #d0={1,10}, n={1,4}
img_shift_m = img_shift * matH
img_ishift = np.fft.ifftshift(img_shift_m)
img_back = cv2.idft(img_ishift)
img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])
plt.imshow(img_back,cmap="gray")
plt.show()
#return matH
def createNewKernel(m,n):
matH = makeMatrixHCV2(640,480,10,1)
matC = makeMatrixC(640,480,m,n)
matH_f = matH.flatten()
matH_f = matH_f[:230400]
matH_f = matH_f.reshape((-1,1))
matC_t = np.linalg.pinv(matC)
vec_h = matC_t.dot(matH_f)
mat_h = vec_h.reshape((3,3))
img = cv2.imread("Lenna.png",0)
img2 = cv2.filter2D(np.float32(img),-1,np.abs(mat_h))
plt.imshow(img2,cmap="gray")
calcCV2()
createNewKernel(3,3)
%matplotlib inline
from matplotlib import pyplot as plt
img = cv2.imread('for1.png', 0)
plt.imshow(img, cmap = 'gray')
plt.show()
img1 = cv2.imread('for2.png', 0)
plt.imshow(img1, cmap = 'gray')
plt.show()
import os
import cv2
import numpy as np
imagesList = list() # all images read from input folder (50)
INPUT_PATH = "./images/" # input folder
OUTPUT_PATH = "./results/" # output folder
# a function to display an image to screen and save it to a file
def showImageAndSaveToFile(img,output):
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', img)
cv2.waitKey(0) and cv2.destroyAllWindows()
cv2.imwrite(output, img)
for fileName in os.listdir(INPUT_PATH): # read all images in input folder
imagesList.append(cv2.imread(INPUT_PATH + fileName, cv2.IMREAD_GRAYSCALE))
xSize, ySize = imagesList[0].shape # get their shape
imgSum = np.zeros((xSize, ySize), dtype=np.uint16) # temporary matrix to store the sum of various uint8 matrixes (the images)
imgAvg = np.zeros((xSize, ySize), dtype=np.uint8) # matrix of the average of all the images read
for img in imagesList:
imgSum = np.add(imgSum, img)
imgSum = np.divide(imgSum,len(imagesList)) # get average
imgAvg = imgSum.astype(np.uint8) # convert to uint8, expected by OpenCv
showImageAndSaveToFile(imgAvg, OUTPUT_PATH + "imgAvg.jpeg") # show the average image
imgBuff = np.zeros((xSize, ySize), dtype=np.float64) # temporary matrix to receive the computation of average noise image
imgNes = np.zeros((xSize, ySize), dtype=np.uint8) # matrix to output the computation done in the temporary matrix
for img in imagesList: #computing average noise
imgBuff = np.add(imgBuff,np.subtract(imgAvg, img)**2)
imgBuff = np.square(np.divide(imgBuff, len(imagesList)))
imgNes = imgBuff.astype(np.uint8) # convert to uint8, expected by OpenCv
showImageAndSaveToFile(imgNes, OUTPUT_PATH + "imgNoiseAvg.jpeg") # show the average noise image
avgNoise = (np.sum(imgNes))/(xSize*ySize) # average noise escalar
print(avgNoise)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'gfdl-cm4', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica
Step2: Con esta tercera aproximación se ha conseguido estabilizar los datos y reducir la desviación estandar, sin embargo, la medía del filamento y de la velocidad de tracción ha disminuido también.
Step3: Filtrado de datos
Step4: Representación de X/Y
Step5: Analizamos datos del ratio
Step6: Límites de calidad
|
<ASSISTANT_TASK:>
Python Code:
#Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('ensayo2.CSV')
%pylab inline
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
columns = ['Diametro X','Diametro Y', 'RPM TRAC']
#Mostramos un resumen de los datos obtenidoss
datos[columns].describe()
#datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']]
datos.ix[:, "Diametro X":"Diametro Y"].plot(figsize=(16,10),ylim=(0.5,3)).hlines([1.85,1.65],0,3500,colors='r')
#datos['RPM TRAC'].plot(secondary_y='RPM TRAC')
datos.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
plt.scatter(x=datos['Diametro X'], y=datos['Diametro Y'], marker='.')
datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)]
#datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.')
ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) |
(datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lambda functions are just anonymous functions and don't need to be created as official functions prior to being used. This makes them useful because they can really help with code readability (if used appropriately). Their true power comes from their expressiveness when performing inline operations.
Step2: square every number that is divisible by 2 from 1 to 20
Step3: That’s a little abstract at this point...
|
<ASSISTANT_TASK:>
Python Code:
x = range(10)
x
[item**2 for item in x]
def square(num):
return num**2
list(map(square, x))
square_lamb = lambda num: num**2
list(map(square_lamb, x))
list(map(lambda num: num**2, x))
[item**2 for item in range(1,20) if item % 2 == 0]
list(map(lambda z: z**2, filter(lambda z: z % 2 == 0, range(1,20))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import Python libraries
Step2: Quick guide (tldr;)
Step3: Full guide
Step4: Here we can just load the vcf file directly into the pca analysis module. Then ask for the samples in samples_vcforder, which is the order in which they are written in the vcf.
Step5: Now construct the default plot, which shows all samples and PCs 1 and 2. By default all samples are assigned to one population, so everything will be the same color.
Step6: Population assignment for sample colors
Step7: This is just much nicer looking now, and it's also much more straightforward to interpret.
Step8: Inspecting PCs directly
Step9: Looking at PCs other than 1 & 2
Step10: It's nice to see PCs 1-4 here, but it's kind of stupid to plot the legend twice, so we can just turn off the legend on the first plot.
Step11: Controlling colors
Step12: Dealing with missing data
Step13: This is useful, but it doesn't give us a clear direction for how to go about dealing with the missingness. One way to reduce missing data is to reduce the tolerance for samples ungenotyped at a snp. The other way to reduce missing data is to remove samples with very poor sequencing. To this end, the .missingness() function will show a table of number of retained snps for various of these conditions.
Step14: Here the columns indicate progressive removal of the samples with the fewest number of snps. So "Full" indicates retention of all samples. "2E_0" shows # snps after removing this sample (as it has the most missing data). "2F_0" shows the # snps after removing both this sample & "2E_0". And so on. You can see as we move from left to right the total number of snps goes down, but also so does the amount of missingness.
Step15: You can see that this also has the effect of reducing the amount of missingness per sample.
Step16: NB
Step17: Imputing missing genotypes
Step18: In comparing this missingness matrix with the previous one, you can see that indeed some snps are being recovered (though not many, again because of the clean simulated data).
Step19: Dealing with unequal sampling
Step20: Dealing with linked snps
|
<ASSISTANT_TASK:>
Python Code:
## conda install ipyrad -c ipyrad
## conda install -c conda-forge scikit-allel
%matplotlib inline
import ipyrad
import ipyrad.analysis as ipa ## ipyrad analysis toolkit
## Load your assembly
data = ipyrad.load_json("/tmp/ipyrad-test/rad.json")
## Create they pca object
pca = ipa.pca(data)
## Bam!
pca.plot()
## Path to the input vcf, in this case it's just the vcf from our ipyrad pedicularis assembly
vcffile = "/home/isaac/ipyrad/test-data/pedicularis/ped_outfiles/ped.vcf"
pca = ipa.pca(vcffile)
print(pca.samples_vcforder)
pca.plot()
pops_dict = {
"superba":["29154_superba_SRR1754715"],
"thamno":["30556_thamno_SRR1754720", "33413_thamno_SRR1754728"],
"cyathophylla":["30686_cyathophylla_SRR1754730"],
"przewalskii":["32082_przewalskii_SRR1754729", "33588_przewalskii_SRR1754727"],
"rex":["35236_rex_SRR1754731", "35855_rex_SRR1754726", "38362_rex_SRR1754725",\
"39618_rex_SRR1754723", "40578_rex_SRR1754724"],
"cyathophylloides":["41478_cyathophylloides_SRR1754722", "41954_cyathophylloides_SRR1754721"]
}
pca = ipa.pca(vcffile, pops_dict)
pca.plot()
## pca.pcs is a property of the pca object that is populated after the plot() function is called. It contains
## the first 10 PCs for each sample. We construct a 'mask' based on the value of PC2, which here is the '1' in
## the first line of code (numpy arrays are 0-indexed and it's typical for PCs to be 1-indexed)
mask = pca.pcs.values[:, 1] > 500
print(mask)
## You can see here that the mask is a list of booleans that is the same length as the number of samples.
## We can use this list to print out the names of just the samples of interest
print(pca.samples_vcforder[mask])
## We can then use this list of "bad" samples in a call to pca.remove_samples
## and then replot the new pca
pca.remove_samples(pca.samples_vcforder[mask])
## Lets prove that they're gone now
print(pca.samples_vcforder)
## and do the plot
pca.plot()
pca.pcs
## Lets reload the full dataset so we have all the samples
pca = ipa.pca(vcffile, pops_dict)
pca.plot(pcs=[3,4])
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
pca.plot(ax=ax1, pcs=[1, 2])
pca.plot(ax=ax2, pcs=[3, 4])
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
pca.plot(ax=ax1, pcs=[1, 2], legend=False)
pca.plot(ax=ax2, pcs=[3, 4])
## Here's the simple way, just pass in a matplotlib cmap, or even better, the name of a cmap
pca.plot(cmap="jet")
## Here's the harder way that gives you uber control. Pass in a dictionary mapping populations to colors.
my_colors = {
"rex":"aliceblue",
"thamno":"crimson",
"przewalskii":"deeppink",
"cyathophylloides":"fuchsia",
"cyathophylla":"goldenrod",
"superba":"black"
}
pca.plot(cdict=my_colors)
pca.get_missing_per_sample()
pca.missingness()
pca.trim_missing(1)
pca.missingness()
pca.get_missing_per_sample()
## Voila. Back to the full dataset.
pca = ipa.pca(data)
pca.missingness()
pca.fill_missing()
pca.missingness()
pca.get_missing_per_sample()
{x:len(y) for x, y in pca.pops.items()}
prettier_labels = {
"32082_przewalskii":"przewalskii",
"33588_przewalskii":"przewalskii",
"41478_cyathophylloides":"cyathophylloides",
"41954_cyathophylloides":"cyathophylloides",
"29154_superba":"superba",
"30686_cyathophylla":"cyathophylla",
"33413_thamno":"thamno",
"30556_thamno":"thamno",
"35236_rex":"rex",
"40578_rex":"rex",
"35855_rex":"rex",
"39618_rex":"rex",
"38362_rex":"rex"
}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='sec2.1'></a>
Step2: Extract POI category and visiting frequency.
Step3: <a id='sec2.2'></a>
Step4: <a id='sec2.3'></a>
Step5: Sequences with length {3, 4, 5}
Step6: <a id='sec3'></a>
Step7: <a id='sec3.3'></a>
Step8: It could be seen from the above plot that discretization based on equal frequency (quantiles) performs better than that based on equal width, to balance the complexity and accuracy, we choose "quantile, nbins=9".
Step9: Quantile based bins (equal frequency)
Step10: Equal width bins
Step11: Another Equal frequency bins
Step12: 3.3.2 Compute Transition Probabilities
Step14: <a id='sec3.4'></a>
Step15: 3.4.2 Discretize POI Pair Distance
Step16: Remove rows that contain NaN and plot the curve, we choose quantile, nbins=10 to balance the complexity and accuracy.
Step17: Quantile based bins (equal frequency)
Step18: Equal width bins
Step19: Another Equal frequency bins
Step20: 3.4.3 Compute Transition Probabilities
Step21: <a id='sec4'></a>
Step22: Simple check.
Step23: <a id='sec4.1'></a>
Step24: <a id='sec4.2'></a>
Step25: Enumerate trajectories of the same (start, end) and length (3, 4 or 5) with respect to an actual sequence.
Step26: Compute the log likelihood of enumerated trajectories.
Step27: <a id='sec4.3'></a>
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import re
import math
import random
import pickle
import pandas as pd
import numpy as np
import scipy.stats
#from numba import jit
from datetime import datetime
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
nfeatures = 8 # number of features
EPS = 1e-12 # smooth, deal with 0 probability
random.seed(123456789) # control random choice when splitting training/testing set
data_dir = 'data/data-ijcai15'
#fvisit = os.path.join(data_dir, 'userVisits-Osak.csv')
#fcoord = os.path.join(data_dir, 'photoCoords-Osak.csv')
#fvisit = os.path.join(data_dir, 'userVisits-Glas.csv')
#fcoord = os.path.join(data_dir, 'photoCoords-Glas.csv')
#fvisit = os.path.join(data_dir, 'userVisits-Edin.csv')
#fcoord = os.path.join(data_dir, 'photoCoords-Edin.csv')
fvisit = os.path.join(data_dir, 'userVisits-Toro.csv')
fcoord = os.path.join(data_dir, 'photoCoords-Toro.csv')
suffix = fvisit.split('-')[-1].split('.')[0]
visits = pd.read_csv(fvisit, sep=';')
coords = pd.read_csv(fcoord, sep=';')
# merge data frames according to column 'photoID'
assert(visits.shape[0] == coords.shape[0])
traj = pd.merge(visits, coords, on='photoID')
traj.head()
num_photo = traj['photoID'].unique().shape[0]
num_user = traj['userID'].unique().shape[0]
num_poi = traj['poiID'].unique().shape[0]
num_seq = traj['seqID'].unique().shape[0]
pd.DataFrame({'#photo': num_photo, '#user': num_user, '#poi': num_poi, '#seq': num_seq, \
'#photo/user': num_photo/num_user, '#seq/user': num_seq/num_user}, index=[str(suffix)])
#plt.figure(figsize=[15, 5])
#plt.xlabel('Longitude')
#plt.ylabel('Latitude')
#plt.scatter(traj['photoLon'], traj['photoLat'], marker='+')
poi_coords = traj[['poiID', 'photoLon', 'photoLat']].groupby('poiID').mean()
poi_coords.reset_index(inplace=True)
poi_coords.rename(columns={'photoLon':'poiLon', 'photoLat':'poiLat'}, inplace=True)
poi_catfreq = traj[['poiID', 'poiTheme', 'poiFreq']].groupby('poiID').first()
poi_catfreq.reset_index(inplace=True)
poi_all = pd.merge(poi_catfreq, poi_coords, on='poiID')
poi_all.set_index('poiID', inplace=True)
#poi_all.to_csv(fpoi, index=True)
seq_all = traj[['userID', 'seqID', 'poiID', 'dateTaken']].copy().groupby(['userID', 'seqID', 'poiID'])\
.agg([np.min, np.max, np.size])
seq_all.columns = seq_all.columns.droplevel()
seq_all.reset_index(inplace=True)
seq_all.rename(columns={'amin':'arrivalTime', 'amax':'departureTime', 'size':'#photo'}, inplace=True)
seq_all['poiDuration(sec)'] = seq_all['departureTime'] - seq_all['arrivalTime']
#seq_all.head()
seq_user = seq_all[['userID', 'seqID', 'poiID']].copy().groupby(['userID', 'seqID']).agg(np.size)
seq_user.reset_index(inplace=True)
seq_user.rename(columns={'size':'seqLen'}, inplace=True)
seq_user.set_index('seqID', inplace=True)
#seq_user.head()
seq_len = seq_all[['seqID', 'poiID']].copy().groupby('seqID').agg(np.size)
seq_len.reset_index(inplace=True)
seq_len.rename(columns={'poiID':'seqLen'}, inplace=True)
#seq_len.head()
seq_stats = seq_all[['seqID', '#photo', 'poiDuration(sec)']].copy().groupby('seqID').agg(np.sum)
seq_stats.reset_index(inplace=True)
#seq_stats.rename(columns={'poiDuration(sec)':'totalPoiDuration(sec)'}, inplace=True)
seq_stats = pd.merge(seq_len, seq_stats, on='seqID')
seq_stats['poiDuration(sec)'] /= 60
seq_stats.rename(columns={'poiDuration(sec)':'totalPoiDuration(min)'}, inplace=True)
seq_stats.set_index('seqID', inplace=True)
#seq_stats.head()
#ax = seq_stats['seqLen'].hist(bins=50)
#ax.set_xlabel('sequence length')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
#ax = seq_stats['#photo'].hist(bins=50)
#ax.set_xlabel('#photo for sequence')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
#ax = seq_stats['totalPoiDuration(min)'].hist(bins=100)
#ax.set_xlabel('totalPoiDuration(min)')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
#ax.set_xscale('log')
#seq_stats = seq_stats[seq_stats['seqLen'].isin({3, 4, 5})]
#ax = seq_stats['totalPoiDuration(min)'].hist(bins=50)
#ax.set_xlabel('totalPoiDuration(min)')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
def extract_seq(seqid, seq_all):
seqi = seq_all[seq_all['seqID'] == seqid].copy()
seqi.sort(columns=['arrivalTime'], ascending=True, inplace=True)
return seqi['poiID'].tolist()
def calc_poi_cat_transmat(seqid_set, poi_all, seq_all):
poi_cats = poi_all['poiTheme'].unique().tolist()
poi_cats.sort()
poi_cat_transmat = pd.DataFrame(data=np.zeros((len(poi_cats), len(poi_cats)), dtype=np.float), \
index=poi_cats, columns=poi_cats)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
for j in range(len(seq)-1):
poi1 = seq[j]
poi2 = seq[j+1]
cat1 = poi_all.loc[poi1, 'poiTheme']
cat2 = poi_all.loc[poi2, 'poiTheme']
poi_cat_transmat.loc[cat1, cat2] += 1
return poi_cat_transmat
def normalise_transmat(transmat):
assert(isinstance(transmat, pd.DataFrame))
for row in range(transmat.index.shape[0]):
nonzeroidx = np.nonzero(transmat.iloc[row])[0].tolist()
if len(nonzeroidx) < transmat.columns.shape[0]:
minv = np.min(transmat.iloc[row, nonzeroidx])
EPS = 0.1 * minv # row-specific smooth factor
#zeroidx = list(set(range(len(transmat.columns))) - set(nonzeroidx))
#transmat.iloc[row, zeroidx] = EPS
transmat.iloc[row] += EPS
rowsum = np.sum(transmat.iloc[row])
assert(rowsum > 0)
transmat.iloc[row] /= rowsum
return transmat
poi_cat_transmat = calc_poi_cat_transmat(seq_all['seqID'].unique(), poi_all, seq_all)
poi_cat_transmat
poi_cat_transmat = normalise_transmat(poi_cat_transmat)
poi_cat_transmat
poi_cat_transmat_log = np.log10(poi_cat_transmat)
poi_cat_transmat_log
rank_mean_Toro = [18.496, 16.049, 16.478, 16.811, 15.049, 15.831, 15.567, 14.556, 14.398, 14.874, 13.491, 13.528, \
13.279, 12.784, 24.761, 21.841, 23.636, 20.154, 19.129, 16.922, 18.240, 17.507, 18.196, 17.711, \
17.389, 15.820, 15.681, 15.712, 15.977]
rank_std_Toro = [24.746, 19.873, 21.002, 22.159, 18.722, 20.140, 19.811, 17.937, 17.694, 18.915, 16.760, 16.964, \
15.960, 15.796, 29.356, 28.366, 29.252, 26.713, 25.530, 20.619, 22.739, 22.659, 23.321, 23.398, \
22.718, 20.856, 19.559, 19.794, 20.373]
rank_mean_Glas = [16.328, 16.188, 15.130, 14.316, 14.581, 14.777, 15.019, 14.255, 13.771, 13.568, 11.464, 12.416, \
12.596, 12.209, 23.705, 23.225, 19.416, 19.201, 19.907, 17.030, 19.977, 18.183, 18.158, 15.555, \
15.174, 15.184, 12.922, 14.274, 20.427]
rank_std_Glas = [19.763, 18.117, 18.643, 17.294, 17.522, 17.175, 17.101, 16.184, 16.043, 15.522, 13.896, 13.081, \
14.259, 13.527, 24.304, 25.065, 22.059, 23.250, 23.132, 19.898, 23.118, 22.388, 21.773, 19.722, \
17.188, 18.837, 15.119, 16.828, 21.596]
rank_top5_Toro = [49.25, 49.25, 49.62, 47.74, 48.12, 48.87, 50.75, 51.88, 50.75, 51.88, 54.51, 54.14, \
53.38, 56.77, 43.23, 47.74, 45.11, 46.99, 46.99, 47.37, 45.49, 48.50, 46.99, 49.25, \
49.62, 53.01, 51.13, 51.13, 49.62]
rank_top5_Glas = [55, 52, 58, 61, 58, 59, 56, 64, 62, 63, 66, 63, 65, 66, 47, 47, 53, 54, 53, 57, 54, 55, 52, 60, 60, \
61, 63, 61, 50]
#xlabels = ['qbins='+str(x) for x in range(2, 16)]
#xlabels.extend(['ebins='+str(x) for x in range(2, 16)])
#xlabels.append('another')
xlabels = [x for x in range(2, len(rank_mean_Toro)+2)]
plt.figure(figsize=[15, 10])
plt.xlim([0, len(rank_mean_Toro)+2])
plt.ylim([-20, 100])
plt.errorbar(xlabels, rank_mean_Toro, rank_std_Toro, linestyle='--', marker='s', label='errorbar_Toronto')
plt.errorbar(xlabels, rank_mean_Glas, rank_std_Glas, linestyle='--', marker='s', label='errorbar_Glasgow')
plt.plot(xlabels, rank_top5_Toro, linestyle='--', marker='s', label='top5_Toronto')
plt.plot(xlabels, rank_top5_Glas, linestyle='--', marker='s', label='top5_Glasgow')
plt.legend()
#idx = 10
idx = 7
plt.annotate('choose', xy=(xlabels[idx], rank_top5_Glas[idx]), xytext=(xlabels[idx], rank_top5_Glas[idx]+15), \
arrowprops=dict(facecolor='green', shrink=0.1))
poi_all['poiFreq'].get_values()
poi_all['poiFreq'].describe()
poi_all['poiFreq'].quantile([.25, .5, .75]).tolist()
ax = poi_all['poiFreq'].hist(bins=10)
ax.set_xlabel('POI Popularity')
ax.set_ylabel('#POI')
#plt.plot(np.ones(poi_all.index.shape[0]), np.sqrt(poi_all['poiFreq']), marker='+')
nbins = 9
quantiles = np.round(np.linspace(0, 1, nbins+1), 2)[1:-1]
quantiles
bins_qt = [0]
bins_qt.extend(poi_all['poiFreq'].quantile(quantiles))
bins_qt.append(poi_all['poiFreq'].max() + 1)
bins_qt
#nbins = 15
#inter = round((poi_all['poiFreq'].max() + 1) / nbins)
#bins_ew = [x*inter for x in range(nbins)]
#bins_ew.append(poi_all['poiFreq'].max() + 1)
#bins_ew
#bins = np.linspace(0, 10000, 11)
#bins = np.logspace(0, 4, 5)
#bins = [1, 100, 500, 1000, 2000, 5000]
#bins_ef = [0, 500, 1500, 10000]
bins_pop = bins_qt
#bins_pop = bins_ew
#bins_pop = bins_ef
ax = poi_all['poiFreq'].hist(bins=bins_pop)
ax.set_xlabel('POI Popularity')
ax.set_ylabel('#POI')
ax.set_xscale('log')
poi_all['popClass'] = np.digitize(poi_all['poiFreq'].get_values(), bins_pop)
#poi_all
def calc_poi_pop_transmat(seqid_set, poi_all, seq_all):
pop_class = poi_all['popClass'].unique().tolist()
pop_class.sort()
poi_pop_transmat = pd.DataFrame(data=np.zeros((len(pop_class), len(pop_class)), dtype=np.float), \
index=pop_class, columns=pop_class)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
for j in range(len(seq)-1):
poi1 = seq[j]
poi2 = seq[j+1]
pc1 = poi_all.loc[poi1, 'popClass']
pc2 = poi_all.loc[poi2, 'popClass']
poi_pop_transmat.loc[pc1, pc2] += 1
return poi_pop_transmat
poi_pop_transmat = calc_poi_pop_transmat(seq_all['seqID'].unique(), poi_all, seq_all)
poi_pop_transmat
poi_pop_transmat = normalise_transmat(poi_pop_transmat)
poi_pop_transmat
poi_pop_transmat_log = np.log10(poi_pop_transmat)
poi_pop_transmat_log
def calc_dist(longitude1, latitude1, longitude2, latitude2):
Calculate the distance (unit: km) between two places on earth
# convert degrees to radians
lon1 = math.radians(longitude1)
lat1 = math.radians(latitude1)
lon2 = math.radians(longitude2)
lat2 = math.radians(latitude2)
radius = 6371.009 # mean earth radius is 6371.009km, en.wikipedia.org/wiki/Earth_radius#Mean_radius
# The haversine formula, en.wikipedia.org/wiki/Great-circle_distance
dlon = math.fabs(lon1 - lon2)
dlat = math.fabs(lat1 - lat2)
return 2 * radius * math.asin( math.sqrt( \
(math.sin(0.5*dlat))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(0.5*dlon))**2 ))
def calc_obs_poipair_distmat(seqid_set, poi_all, seq_all):
poi_distmat = pd.DataFrame(data=np.full((poi_all.shape[0], poi_all.shape[0]), np.nan, dtype=np.float), \
index=poi_all.index, columns=poi_all.index)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
if len(seq) < 2: continue
for j in range(len(seq)-1):
poi1 = seq[j]
poi2 = seq[j+1]
if np.isnan(poi_distmat.loc[poi1, poi2]):
dist = calc_dist(poi_all.loc[poi1, 'poiLon'], poi_all.loc[poi1, 'poiLat'], \
poi_all.loc[poi2, 'poiLon'], poi_all.loc[poi2, 'poiLat'])
poi_distmat.loc[poi1, poi2] = dist
poi_distmat.loc[poi2, poi1] = dist
return poi_distmat
poi_distmat = calc_obs_poipair_distmat(seq_all['seqID'].unique(), poi_all, seq_all)
#poi_distmat
rank_mean_Toro = [15.156, 14.917, 14.389, 13.645, 14.299, 12.689, 12.996, 12.510, 12.467, 12.548, 11.980, \
12.170, 11.384, 11.444, 10.932, 10.991, 10.836, 15.110]
rank_std_Toro = [16.911, 17.484, 17.527, 16.550, 17.550, 14.674, 15.606, 14.946, 14.549, 14.758, 13.883, \
13.983, 12.787, 12.888, 12.621, 12.950, 12.383, 16.204]
rank_mean_Glas = [14.354, 14.450, 14.255, 14.085, 13.156, 12.755, 11.716, 11.355, 11.181, 10.214, 10.041, \
9.345, 9.008, 8.613, 8.553, 8.025, 7.922, 14.937]
rank_std_Glas = [16.541, 16.173, 16.184, 16.159, 14.436, 13.681, 13.003, 12.893, 12.107, 10.872, 10.895, \
9.759, 9.444, 9.052, 9.336, 8.502, 8.702, 16.630]
rank_top5_Toro = [45.11, 48.87, 52.26, 55.64, 51.88, 52.63, 53.76, 52.63, 54.89, 53.76, 55.26, 52.63, 57.52, \
56.77, 58.65, 58.65, 57.14, 43.98]
rank_top5_Glas = [60, 60, 64, 63, 63, 62, 64, 66, 68, 68, 73, 73, 73, 75, 74, 79, 79, 60]
xlabels = [x for x in range(2, len(rank_mean_Toro)+2)]
plt.figure(figsize=[15, 10])
plt.xlim([0, len(rank_mean_Toro)+2])
plt.ylim([-20, 100])
plt.errorbar(xlabels, rank_mean_Toro, rank_std_Toro, linestyle='--', marker='s', label='errorbar_Toronto')
plt.errorbar(xlabels, rank_mean_Glas, rank_std_Glas, linestyle='--', marker='s', label='errorbar_Glasgow')
plt.plot(xlabels, rank_top5_Toro, linestyle='--', marker='s', label='top5_Toronto')
plt.plot(xlabels, rank_top5_Glas, linestyle='--', marker='s', label='top5_Glasgow')
plt.legend()
#idx = 10
idx = 8
plt.annotate('choose', xy=(xlabels[idx], rank_top5_Glas[idx]), xytext=(xlabels[idx], rank_top5_Glas[idx]+15), \
arrowprops=dict(facecolor='green', shrink=0.1))
#distdata = pd.Series([x for x in np.unique(poi_distmat.get_values().flatten()) if not np.isnan(x)])
distdata = pd.Series([poi_distmat.iloc[x, y] \
for x in range(poi_distmat.index.shape[0]) \
for y in range(x+1, poi_distmat.index.shape[0]) \
if not np.isnan(poi_distmat.iloc[x, y])])
distdata.describe()
ax = distdata.hist(bins=20)
ax.set_xlabel('POI-Pair Distance (km)')
ax.set_ylabel('#POI-Pair')
nbins = 10
quantiles = np.round(np.linspace(0, 1, nbins+1), 2)[1:-1]
quantiles
bins_qt = [0]
bins_qt.extend(distdata.quantile(quantiles))
bins_qt.append(10*round(distdata.max()))
bins_qt
#nbins = 4
#inter = round((round(distdata.max()) + 1) / nbins)
#maxdist = 30 # Toronto, maximum distance among all POI pairs
#maxdist = 46 # Glasgow
#inter = round(maxdist / nbins)
#bins_ew = [x*inter for x in range(nbins)]
#bins_ew.append(maxdist)
#bins_ew
#bins = np.linspace(0, 10, 7)
#bins = np.logspace(0, 2, 4)
#bins = [0, 1, 2, 3, 10]
#bins_a = [0, 2, 5, 100] # walk, ride, drive
#bins_ef = [0, 1.15, 2.25, 100]
bins_dist = bins_qt
#bins_dist = bins_ew
#bins_dist = bins_ef
#bins_dist = bins_a
ax = distdata.ix[np.nonzero(distdata)].hist(bins=bins_dist)
ax.set_xlabel('POI-Pair Distance (km)')
ax.set_ylabel('#POI-Pair')
ax.set_xscale('log')
poi_distclass_mat = pd.DataFrame(data=np.zeros((poi_all.shape[0], poi_all.shape[0]), dtype=np.int), \
index=poi_all.index, columns=poi_all.index)
for i in range(poi_all.index.shape[0]):
poi1 = poi_all.index[i]
for j in range(i+1, poi_all.index.shape[0]):
poi2 = poi_all.index[j]
dc = None
if np.isnan(poi_distmat.loc[poi1, poi2]):
dist = calc_dist(poi_all.loc[poi1, 'poiLon'], poi_all.loc[poi1, 'poiLat'], \
poi_all.loc[poi2, 'poiLon'], poi_all.loc[poi2, 'poiLat'])
dc = np.digitize([dist], bins_dist)[0]
else:
dc = np.digitize([poi_distmat.loc[poi1, poi2]], bins_dist)[0]
assert(dc is not None)
poi_distclass_mat.loc[poi1, poi2] = dc
poi_distclass_mat.loc[poi2, poi1] = dc
poi_distclass_mat
def calc_poipair_dist_transmat(seqid_set, poi_all, seq_all, poi_distclass_mat, bins_dist):
dist_class = list(range(1, len(bins_dist)))
poipair_dist_transmat = pd.DataFrame(data=np.zeros((len(dist_class), len(dist_class)), dtype=np.float), \
index=dist_class, columns=dist_class)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
if len(seq) < 3: continue
for j in range(1, len(seq)-1):
poi1 = seq[j-1]
poi2 = seq[j]
poi3 = seq[j+1]
dc1 = poi_distclass_mat.loc[poi1, poi2]
dc2 = poi_distclass_mat.loc[poi2, poi3]
poipair_dist_transmat.loc[dc1, dc2] += 1
return poipair_dist_transmat
poipair_dist_transmat = calc_poipair_dist_transmat(seq_all['seqID'].unique(), poi_all, seq_all, \
poi_distclass_mat, bins_dist)
poipair_dist_transmat
poipair_dist_transmat = normalise_transmat(poipair_dist_transmat)
poipair_dist_transmat
poipair_dist_transmat_log = np.log10(poipair_dist_transmat)
poipair_dist_transmat_log
def calc_seq_loglikelihood(seq, poi_all, poi_cat_transmat_log, poi_pop_transmat_log, \
poi_distclass_mat, poipair_dist_transmat_log):
assert(len(seq) > 1)
cat1 = poi_all.loc[seq[0], 'poiTheme']
cat2 = poi_all.loc[seq[1], 'poiTheme']
pc1 = poi_all.loc[seq[0], 'popClass']
pc2 = poi_all.loc[seq[1], 'popClass']
logL = poi_cat_transmat_log.loc[cat1, cat2] + poi_pop_transmat_log.loc[pc1, pc2]
for j in range(1, len(seq)-1):
poi1 = seq[j-1]
poi2 = seq[j]
poi3 = seq[j+1]
cat2 = poi_all.loc[poi2, 'poiTheme']
cat3 = poi_all.loc[poi3, 'poiTheme']
pc2 = poi_all.loc[poi2, 'popClass']
pc3 = poi_all.loc[poi3, 'popClass']
dc12 = poi_distclass_mat.loc[poi1, poi2]
dc23 = poi_distclass_mat.loc[poi2, poi3]
logL += poi_cat_transmat_log.loc[cat2, cat3] + poi_pop_transmat_log.loc[pc2, pc3]
#print(seq, dc12, dc23)
logL += poipair_dist_transmat_log.loc[dc12, dc23]
return logL
seq1 = [10, 21, 28, 22]
d12 = calc_dist(poi_all.loc[10,'poiLon'], poi_all.loc[10,'poiLat'], poi_all.loc[21,'poiLon'], poi_all.loc[21, 'poiLat'])
d23 = calc_dist(poi_all.loc[21,'poiLon'], poi_all.loc[21,'poiLat'], poi_all.loc[28,'poiLon'], poi_all.loc[28, 'poiLat'])
d34 = calc_dist(poi_all.loc[28,'poiLon'], poi_all.loc[28,'poiLat'], poi_all.loc[22,'poiLon'], poi_all.loc[22, 'poiLat'])
print(d12, d23, d34)
print(bins_dist)
s1 = poi_cat_transmat_log.loc[poi_all.loc[10, 'poiTheme'], poi_all.loc[21, 'poiTheme']] + \
poi_cat_transmat_log.loc[poi_all.loc[21, 'poiTheme'], poi_all.loc[28, 'poiTheme']] + \
poi_cat_transmat_log.loc[poi_all.loc[28, 'poiTheme'], poi_all.loc[22, 'poiTheme']] + \
poi_pop_transmat_log.loc[poi_all.loc[10, 'popClass'], poi_all.loc[21, 'popClass']] + \
poi_pop_transmat_log.loc[poi_all.loc[21, 'popClass'], poi_all.loc[28, 'popClass']] + \
poi_pop_transmat_log.loc[poi_all.loc[28, 'popClass'], poi_all.loc[22, 'popClass']]
s2 = poipair_dist_transmat_log.loc[np.digitize([d12], bins_dist)[0], np.digitize([d23], bins_dist)[0]] + \
poipair_dist_transmat_log.loc[np.digitize([d23], bins_dist)[0], np.digitize([d34], bins_dist)[0]]
print(s1+s2)
calc_seq_loglikelihood([10, 21, 28, 22], poi_all, poi_cat_transmat_log, poi_pop_transmat_log, \
poi_distclass_mat, poipair_dist_transmat_log)
def parse_seqstr(seqstr):
term = re.sub('[ \[\]]', '', seqstr).split(',')
return [int(x) for x in term]
unique_seq = dict() # seq -> [(seqid, userid)]
for seqid in sorted(seq_all['seqID'].unique().tolist()):
seq = extract_seq(seqid, seq_all)
if str(seq) not in unique_seq:
unique_seq[str(seq)] = [(seqid, seq_user.loc[seqid])]
else:
unique_seq[str(seq)].append((seqid, seq_user.loc[seqid]))
unique_seq345 = [parse_seqstr(x) for x in sorted(unique_seq.keys()) if len(x.split(',')) in {3,4,5}]
unique_seq345_logL = pd.DataFrame(data=np.zeros((len(unique_seq345), 2), dtype=np.float), \
index=[str(x) for x in unique_seq345], columns=['logLikelihood', 'seqLen'])
unique_seq345_logL.index.name = 'actSeq'
for seq in unique_seq345:
assert(len(seq) in {3,4,5})
logL = calc_seq_loglikelihood(seq, poi_all, poi_cat_transmat_log, poi_pop_transmat_log, \
poi_distclass_mat, poipair_dist_transmat_log)
unique_seq345_logL.loc[str(seq), 'logLikelihood'] = logL
unique_seq345_logL.loc[str(seq), 'seqLen'] = len(seq)
#print('Sequence %-20s Log likelihood: %.3f' % (str(seq), logL))
print(unique_seq345_logL.index.shape[0])
unique_seq345_logL.head()
unique_seq345_logL['seqLen'].hist(bins=10)
poi_list = poi_all.index.tolist()
#poi_list
def enum_seq345(seq, poi_list):
assert(len(seq) in {3, 4, 5})
p0 = seq[0]
pN = seq[-1]
# enumerate sequences with length 3
if len(seq) == 3:
return [[p0, p, pN] \
for p in poi_list if p not in {p0, pN}]
# enumerate sequences with length 4
if len(seq) == 4:
return [[p0, p1, p2, pN] \
for p1 in poi_list if p1 not in {p0, pN} \
for p2 in poi_list if p2 not in {p0, p1, pN}]
# enumerate sequences with length 5
if len(seq) == 5:
return [[p0, p1, p2, p3, pN] \
for p1 in poi_list if p1 not in {p0, pN} \
for p2 in poi_list if p2 not in {p0, p1, pN} \
for p3 in poi_list if p3 not in {p0, p1, p2, pN}]
enum_logL_df = pd.DataFrame()
for seq in unique_seq345:
enum_seqs = enum_seq345(seq, poi_list)
df = pd.DataFrame(data=sorted([str(x) for x in enum_seqs]), columns=['enumSeq'])
df.set_index('enumSeq', inplace=True)
df['actSeq'] = str(seq)
enum_logL_df = enum_logL_df.append(df)
print(enum_logL_df.shape)
enum_logL_df.head()
t1 = datetime.now()
logL = Parallel(n_jobs=-2)(delayed(calc_seq_loglikelihood)\
(seq, poi_all, poi_cat_transmat_log, poi_pop_transmat_log, poi_distclass_mat, poipair_dist_transmat_log)\
for seq in [parse_seqstr(x) for x in enum_logL_df.index])
print('%d seconds used' % (datetime.now()-t1).total_seconds()) # 930 seconds
enum_logL_df['enumSeqLogLikelihood'] = logL
#enum_logL_df.head(23)
df = pd.DataFrame(data=sorted([str(x) for x in unique_seq345]), columns=['actSeq'])
df.set_index('actSeq', inplace=True)
df['actSeqLogLikelihood'] = unique_seq345_logL.loc[df.index, 'logLikelihood']
df['enumSeq'] = ''
df['enumSeqLogLikelihood'] = 0
df['actSeqRank'] = 0
df['#enumSeq'] = 0
for seqstr in df.index:
sub_df = enum_logL_df[enum_logL_df['actSeq'] == seqstr].copy()
sub_df.reset_index(inplace=True)
sub_df.sort(columns=['enumSeqLogLikelihood'], ascending=False, inplace=True)
df.loc[seqstr, 'enumSeq'] = sub_df.iloc[0]['enumSeq']
df.loc[seqstr, 'enumSeqLogLikelihood'] = sub_df.iloc[0]['enumSeqLogLikelihood']
df.loc[seqstr, 'actSeqRank'] = 1 + np.nonzero(sub_df['enumSeq'] == seqstr)[0][0] # rank of actual sequence
df.loc[seqstr, '#enumSeq'] = sub_df.index.shape[0]
df['actSeqRank(Top%)'] = 100*df['actSeqRank']/df['#enumSeq']
#df
print('mean: %.3f, std: %.3f' % (round(df['actSeqRank(Top%)'].mean(),3), round(df['actSeqRank(Top%)'].std(),3)))
df['actSeqRank(Top%)'].describe()
ntop = np.nonzero(df['actSeqRank'] <= 5)[0].shape[0]
print('%d/%d = %.2f%%' % (ntop, df.index.shape[0], 100*ntop/df.index.shape[0]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us implement a Hopfield network using images from the MNIST dataset as patterns.
Step2: We now fill a array with all parameters. We only need few samples, we take them from the training set.
Step3: Let us visualize our two patterns
Step4: Learning the weights
Step5: Recall
Step6: Here you can see two animations showing the network that is initially activated with one of the two patterns. The initial activation is corrupted with a lot of noise so that the bottom half of the figure is completelly obscured.
Step7: To plot the two animations we need a function to initialize a figure with three plots
Step8: We also need one another function that updates the figure at
Step9: Finally we use the FuncAnimation class. We first build a data list where each row is a tuple containing plot handlers and data do for plot updates..
Step10: <br><br><br><br><br><br><br><br><br><br><br><br><br><br>
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pylab import *
#### Download the dataset
# Get the script from internet
! wget https://raw.githubusercontent.com/sorki/python-mnist/master/get_data.sh > /dev/null 2>&1
# Run it to dovnload all files in a local dir named 'data'
! bash get_data.sh >/dev/null 2>&1
# We do not need the script anymore, remove it
! rm get_data.sh* > /dev/null 2>&1
# Initialize the dataset variables
%run utils
# Take two rows
patterns = array(mndata.train_images)[[2,5],]
labels = array(mndata.train_labels)[[2,5],]
# We need only the sign (transform to binary input)
patterns = sign(patterns/255.0 - 0.5)
# Set the number of patterns (two in out case)
n_patterns = patterns.shape[0]
# Number of units of the network
n = img_side*img_side
fig = figure(figsize = (8, 4))
for i in xrange(n_patterns):
plot_img( to_mat(patterns[i]),
fig, i+1, windows = 2 )
# Initialize weights to zero values
W = zeros([n,n])
# Accumulate outer products
for pattern in patterns :
W += outer(pattern, pattern)
# Divide times the number of patterns
W /= float(n_patterns)
# Exclude the autoconnections
W *= 1.0 - eye(n, n)
# Number of timesteps
stime = 1000
# Number of samples to store as long
# as spreading goes on
samples = 100
# store data at each sampling interval
sample_interval = stime/samples
# Init the stories of spreading as a zero array,
# we will fill it in at each timestep and we will
# plot it at the end
store_images = zeros([n_patterns, n, samples])
# Init the stories of energy as a zero array,
# we will fill it in at each timestep and we will
# plot it at the end
store_energy = zeros([n_patterns, samples])
# We simulate two iterations, each one starting
# with a coorupted version of one of our two patterns
for target_index in xrange(n_patterns) :
# Copy the original pattern
target = patterns[target_index]
x = target.copy()
# Then modify the second half of the image
# putting random binary values
x[(n/2):] = sign(randn(n/2))
# During the iterations we need to peek
# one unit at random. Thus we must prepare
# a random sequence of indices:
# we get the sequence of indices
# of the network units
x_indices = arange(n)
# and we shuffle it
shuffle(x_indices)
# the iterations
for t in xrange(stime) :
# Get the current index browsing
# the random sequence
current_x = x_indices[t%n]
# Activation of a unit
x[current_x] = sign(dot(W[current_x,:], x))
# Store current activations
if stime%sample_interval == 0 :
# Energy of the current state of the network
store_energy[target_index, t/sample_interval] = -0.5*dot(x, dot(W, x))
# array containing samples of network activation
store_images[target_index,:,t/sample_interval] = x
# The matplotlib object to do animations
from matplotlib import animation
# This grid allows to layout subplots in a more
# flexible way
import matplotlib.gridspec as gridspec
def init_figure(fig) :
# Init the grid and the figure
gs = gridspec.GridSpec(6, 20)
#-------------------------------------------------
# Plot 1 - plot the target digit
# Create subplot
ax1 = fig.add_subplot(gs[:4,:4])
title("target")
# Create the imshow and save the handler
im_target = ax1.imshow(to_mat(patterns[0]),
interpolation = 'none',
aspect = 'auto',
cmap = cm.binary)
axis('off')
#-------------------------------------------------
# Plot 2 - plot the current state of the network
# Create subplot
ax2 = fig.add_subplot(gs[:4,6:10])
title("recalling")
# Create the imshow and save the handler
im_activation = ax2.imshow(to_mat(store_images[0,:,0]),
interpolation = 'none',
aspect = 'auto',
cmap = cm.binary)
axis('off')
#-------------------------------------------------
# Plot 3 - plot the current history of energy
# Create subplot
ax3 = fig.add_subplot(gs[:4,12:])
title("Energy")
# Create the line plot and save the handler
im_energy, = ax3.plot(store_energy[0,])
# Only bottom-left axes - no tics
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.set_xticks([])
ax3.set_yticks([])
# return plot handlers
return im_target, im_activation, im_energy
# Updates images at each frame of the animation
# data : list of tuples Each row contains the
# arguments of update for
# a frame
# returns : tuple The handlers of the
# images
def update(data) :
# unpack plot handlers and data
im_A, im_B, im_C, A, B, C = data
# Update data of plot 1, plot 2 and 3
im_A.set_array(to_mat(A))
im_B.set_array(to_mat(B))
im_C.set_data(arange( len(C)), C)
# return plot handlers
return im_A, im_B, im_C
for target_index in xrange(n_patterns):
# Init the figure
fig = figure(figsize=(8, 3.5))
im_target, im_activation, im_energy = init_figure(fig)
# Build the sequence of update arguments.
# each row of the list contains:
# 1 the target plot handler
# 2 the activation plot handler
# 3 the energy plot handler
# 4 the target update data
# 5 the activation update data
# 6 the energy update data
data = [(
im_target,
im_activation,
im_energy,
patterns[target_index],
squeeze(store_images[target_index,:,t]),
store_energy[target_index, :t] )
for t in xrange(samples ) ]
# Create and render the animation
anim = animation.FuncAnimation(fig, func = update, frames = data )
# save it to file
anim.save("mnist-hopfield_{:d}.gif".format(labels[target_index]),
fps = 10, writer='imagemagick')
from IPython.core.display import HTML
def css_styling():
styles = open("../style/ipybn.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def countDigit(n ) :
prime =[False ] * 10
prime[2 ] = True
prime[3 ] = True ;
prime[5 ] = True
prime[7 ] = True ;
temp = n
count = 0 ;
while(temp != 0 ) :
d = temp % 10 ;
temp //= 10 ;
if(d > 0 and n % d == 0 and prime[d ] ) :
count += 1
return count
n = 1032
print(countDigit(n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sparsity pattern
Step2: What we need to find out to see how it actually works
Step3: As you see, CSR is faster, and for more unstructured patterns the gain will be larger.
Step4: How to make things more efficient
Step5: Florida sparse matrix collection
Step6: Visualization of sparse matrices and graphs
Step7: And the factors...
Step8: Interesting to note that splu without permc_spec will produce permutations which will not yield the bidiagonal factor
Step9: 2D-case
Step10: For correct permutation in 2D case the number of nonzeros in $L$ factor grows as $\mathcal{O}(N \log N)$. But complexity is $\mathcal{O}(N^{3/2})$.
Step11: Fill-in
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy as sp
import scipy.sparse
from scipy.sparse import csc_matrix, csr_matrix
import matplotlib.pyplot as plt
import scipy.linalg
import scipy.sparse.linalg
%matplotlib inline
n = 5
ex = np.ones(n);
lp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
e = sp.sparse.eye(n)
A = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)
A = csc_matrix(A)
plt.spy(A, aspect='equal', marker='.', markersize=5)
n = 10
ex = np.ones(n);
lp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
e = sp.sparse.eye(n)
A = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)
A = csr_matrix(A)
rhs = np.ones(n * n)
sol = sp.sparse.linalg.spsolve(A, rhs)
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(sol)
ax1.set_title('Not reshaped solution')
ax2.contourf(sol.reshape((n, n), order='f'))
ax2.set_title('Reshaped solution')
import numpy as np
import scipy as sp
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse import csc_matrix, csr_matrix, coo_matrix
import matplotlib.pyplot as plt
%matplotlib inline
n = 400
ex = np.ones(n);
lp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
e = sp.sparse.eye(n)
A = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)
A = csr_matrix(A)
rhs = np.ones(n * n)
B = coo_matrix(A)
%timeit A.dot(rhs)
%timeit B.dot(rhs)
import numpy as np
import time
n = 4000
a = np.random.randn(n, n)
v = np.random.randn(n)
t = time.time()
np.dot(a, v)
t = time.time() - t
print('Time: {0: 3.1e}, Efficiency: {1: 3.1e} Gflops'.\
format(t, ((2 * n ** 2)/t) / 10 ** 9))
n = 4000
ex = np.ones(n);
a = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
rhs = np.random.randn(n)
t = time.time()
a.dot(rhs)
t = time.time() - t
print('Time: {0: 3.1e}, Efficiency: {1: 3.1e} Gflops'.\
format(t, (3 * n) / t / 10 ** 9))
n = 1000000
k = 1
ex = np.ones(n);
a = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
rhs = np.random.randn(n, k)
t = time.time()
a.dot(rhs)
t = time.time() - t
print('Time: {0: 3.1e}, Efficiency: {1: 3.1e} Gflops'.\
format(t, (3 * n * k) / t / 10 ** 9))
from IPython.display import HTML
HTML('<iframe src=http://yifanhu.net/GALLERY/GRAPHS/search.html width=700 height=450></iframe>')
#Indeed, it is not sparse
n = 7
ex = np.ones(n);
a = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
a = a.todense()
b = np.array(np.linalg.inv(a))
print a
print b
from scipy.sparse.linalg import splu
T = splu(a, permc_spec="NATURAL")
print T.L.todense()
from scipy.sparse.linalg import splu
T = splu(a)
print T.L.todense()
print T.perm_c
n = 20
ex = np.ones(n);
lp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
e = sp.sparse.eye(n)
A = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)
A = csc_matrix(A)
T = scipy.sparse.linalg.spilu(A)
plt.spy(T.L, marker='.', color='k', markersize=8)
import networkx as nx
n = 10
ex = np.ones(n);
lp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr');
e = sp.sparse.eye(n)
A = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)
A = csc_matrix(A)
G = nx.Graph(A)
nx.draw(G, pos=nx.spring_layout(G), node_size=10)
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Creating your first neural network with TF-Slim
Step3: Let's create the model and examine its structure.
Step4: Let's create some 1d regression data .
Step5: Let's fit the model to the data
Step6: Training with multiple loss functions.
Step7: Let's load the saved model and use it for prediction.
Step8: Let's compute various evaluation metrics on the test set.
Step9: Reading Data with TF-Slim
Step10: Display some of the data.
Step11: Convolutional neural nets (CNNs).
Step12: Apply the model to some randomly generated images.
Step14: Train the model on the Flowers dataset.
Step15: Evaluate some metrics.
Step16: Using pre-trained models
Step17: Apply Pre-trained Inception V1 model to Images.
Step18: Download the VGG-16 checkpoint
Step19: Apply Pre-trained VGG-16 model to Images.
Step21: Fine-tune the model on a different set of labels.
Step22: Apply fine tuned model to some images.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import math
import numpy as np
import tensorflow as tf
import time
from datasets import dataset_utils
# Main slim library
from tensorflow.contrib import slim
def regression_model(inputs, is_training=True, scope="deep_regression"):
Creates the regression model.
Args:
inputs: A node that yields a `Tensor` of size [batch_size, dimensions].
is_training: Whether or not we're currently training the model.
scope: An optional variable_op scope for the model.
Returns:
predictions: 1-D `Tensor` of shape [batch_size] of responses.
end_points: A dict of end points representing the hidden layers.
with tf.variable_scope(scope, 'deep_regression', [inputs]):
end_points = {}
# Set the default weight _regularizer and acvitation for each fully_connected layer.
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(0.01)):
# Creates a fully connected layer from the inputs with 32 hidden units.
net = slim.fully_connected(inputs, 32, scope='fc1')
end_points['fc1'] = net
# Adds a dropout layer to prevent over-fitting.
net = slim.dropout(net, 0.8, is_training=is_training)
# Adds another fully connected layer with 16 hidden units.
net = slim.fully_connected(net, 16, scope='fc2')
end_points['fc2'] = net
# Creates a fully-connected layer with a single hidden unit. Note that the
# layer is made linear by setting activation_fn=None.
predictions = slim.fully_connected(net, 1, activation_fn=None, scope='prediction')
end_points['out'] = predictions
return predictions, end_points
with tf.Graph().as_default():
# Dummy placeholders for arbitrary number of 1d inputs and outputs
inputs = tf.placeholder(tf.float32, shape=(None, 1))
outputs = tf.placeholder(tf.float32, shape=(None, 1))
# Build model
predictions, end_points = regression_model(inputs)
# Print name and shape of each tensor.
print("Layers")
for k, v in end_points.items():
print('name = {}, shape = {}'.format(v.name, v.get_shape()))
# Print name and shape of parameter nodes (values not yet initialized)
print("\n")
print("Parameters")
for v in slim.get_model_variables():
print('name = {}, shape = {}'.format(v.name, v.get_shape()))
def produce_batch(batch_size, noise=0.3):
xs = np.random.random(size=[batch_size, 1]) * 10
ys = np.sin(xs) + 5 + np.random.normal(size=[batch_size, 1], scale=noise)
return [xs.astype(np.float32), ys.astype(np.float32)]
x_train, y_train = produce_batch(200)
x_test, y_test = produce_batch(200)
plt.scatter(x_train, y_train)
def convert_data_to_tensors(x, y):
inputs = tf.constant(x)
inputs.set_shape([None, 1])
outputs = tf.constant(y)
outputs.set_shape([None, 1])
return inputs, outputs
# The following snippet trains the regression model using a mean_squared_error loss.
ckpt_dir = '/tmp/regression_model/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
inputs, targets = convert_data_to_tensors(x_train, y_train)
# Make the model.
predictions, nodes = regression_model(inputs, is_training=True)
# Add the loss function to the graph.
loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)
# The total loss is the user's loss plus any regularization losses.
total_loss = slim.losses.get_total_loss()
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.005)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training inside a session.
final_loss = slim.learning.train(
train_op,
logdir=ckpt_dir,
number_of_steps=5000,
save_summaries_secs=5,
log_every_n_steps=500)
print("Finished training. Last batch loss:", final_loss)
print("Checkpoint saved in %s" % ckpt_dir)
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_train, y_train)
predictions, end_points = regression_model(inputs, is_training=True)
# Add multiple loss nodes.
mean_squared_error_loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)
absolute_difference_loss = slim.losses.absolute_difference(predictions, targets)
# The following two ways to compute the total loss are equivalent
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss1 = mean_squared_error_loss + absolute_difference_loss + regularization_loss
# Regularization Loss is included in the total loss by default.
# This is good for training, but not for testing.
total_loss2 = slim.losses.get_total_loss(add_regularization_losses=True)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op) # Will initialize the parameters with random weights.
total_loss1, total_loss2 = sess.run([total_loss1, total_loss2])
print('Total Loss1: %f' % total_loss1)
print('Total Loss2: %f' % total_loss2)
print('Regularization Losses:')
for loss in slim.losses.get_regularization_losses():
print(loss)
print('Loss Functions:')
for loss in slim.losses.get_losses():
print(loss)
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
# Create the model structure. (Parameters will be loaded below.)
predictions, end_points = regression_model(inputs, is_training=False)
# Make a session which restores the old parameters from a checkpoint.
sv = tf.train.Supervisor(logdir=ckpt_dir)
with sv.managed_session() as sess:
inputs, predictions, targets = sess.run([inputs, predictions, targets])
plt.scatter(inputs, targets, c='r');
plt.scatter(inputs, predictions, c='b');
plt.title('red=true, blue=predicted')
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
predictions, end_points = regression_model(inputs, is_training=False)
# Specify metrics to evaluate:
names_to_value_nodes, names_to_update_nodes = slim.metrics.aggregate_metric_map({
'Mean Squared Error': slim.metrics.streaming_mean_squared_error(predictions, targets),
'Mean Absolute Error': slim.metrics.streaming_mean_absolute_error(predictions, targets)
})
# Make a session which restores the old graph parameters, and then run eval.
sv = tf.train.Supervisor(logdir=ckpt_dir)
with sv.managed_session() as sess:
metric_values = slim.evaluation.evaluation(
sess,
num_evals=1, # Single pass over data
eval_op=names_to_update_nodes.values(),
final_op=names_to_value_nodes.values())
names_to_values = dict(zip(names_to_value_nodes.keys(), metric_values))
for key, value in names_to_values.items():
print('%s: %f' % (key, value))
import tensorflow as tf
from datasets import dataset_utils
url = "http://download.tensorflow.org/data/flowers.tar.gz"
flowers_data_dir = '/tmp/flowers'
if not tf.gfile.Exists(flowers_data_dir):
tf.gfile.MakeDirs(flowers_data_dir)
dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)
from datasets import flowers
import tensorflow as tf
from tensorflow.contrib import slim
with tf.Graph().as_default():
dataset = flowers.get_split('train', flowers_data_dir)
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32, common_queue_min=1)
image, label = data_provider.get(['image', 'label'])
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
for i in range(4):
np_image, np_label = sess.run([image, label])
height, width, _ = np_image.shape
class_name = name = dataset.labels_to_names[np_label]
plt.figure()
plt.imshow(np_image)
plt.title('%s, %d x %d' % (name, height, width))
plt.axis('off')
plt.show()
def my_cnn(images, num_classes, is_training): # is_training is not used...
with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2):
net = slim.conv2d(images, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.conv2d(net, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.flatten(net)
net = slim.fully_connected(net, 192)
net = slim.fully_connected(net, num_classes, activation_fn=None)
return net
import tensorflow as tf
with tf.Graph().as_default():
# The model can handle any input size because the first layer is convolutional.
# The size of the model is determined when image_node is first passed into the my_cnn function.
# Once the variables are initialized, the size of all the weight matrices is fixed.
# Because of the fully connected layers, this means that all subsequent images must have the same
# input size as the first image.
batch_size, height, width, channels = 3, 28, 28, 3
images = tf.random_uniform([batch_size, height, width, channels], maxval=1)
# Create the model.
num_classes = 10
logits = my_cnn(images, num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
# Initialize all the variables (including parameters) randomly.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the init_op, evaluate the model outputs and print the results:
sess.run(init_op)
probabilities = sess.run(probabilities)
print('Probabilities Shape:')
print(probabilities.shape) # batch_size x num_classes
print('\nProbabilities:')
print(probabilities)
print('\nSumming across all classes (Should equal 1):')
print(np.sum(probabilities, 1)) # Each row sums to 1
from preprocessing import inception_preprocessing
import tensorflow as tf
from tensorflow.contrib import slim
def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):
Loads a single batch of data.
Args:
dataset: The dataset to load.
batch_size: The number of images in the batch.
height: The size of each image after preprocessing.
width: The size of each image after preprocessing.
is_training: Whether or not we're currently training or evaluating.
Returns:
images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed.
images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization.
labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes.
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32,
common_queue_min=8)
image_raw, label = data_provider.get(['image', 'label'])
# Preprocess image for usage by Inception.
image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)
# Preprocess the image for display purposes.
image_raw = tf.expand_dims(image_raw, 0)
image_raw = tf.image.resize_images(image_raw, [height, width])
image_raw = tf.squeeze(image_raw)
# Batch it up.
images, images_raw, labels = tf.train.batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=1,
capacity=2 * batch_size)
return images, images_raw, labels
from datasets import flowers
# This might take a few minutes.
train_dir = '/tmp/tfslim_model/'
print('Will save model to %s' % train_dir)
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
# Create the model:
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
slim.losses.softmax_cross_entropy(logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
number_of_steps=1, # For speed, we just do 1 epoch
save_summaries_secs=1)
print('Finished training. Final batch loss %d' % final_loss)
from datasets import flowers
# This might take a few minutes.
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.DEBUG)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5),
})
print('Running evaluation Loop...')
checkpoint_path = tf.train.latest_checkpoint(train_dir)
metric_values = slim.evaluation.evaluate_once(
master='',
checkpoint_path=checkpoint_path,
logdir=train_dir,
eval_op=names_to_updates.values(),
final_op=names_to_values.values())
names_to_values = dict(zip(names_to_values.keys(), metric_values))
for name in names_to_values:
print('%s: %f' % (name, names_to_values[name]))
from datasets import dataset_utils
url = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
import numpy as np
import os
import tensorflow as tf
try:
import urllib2 as urllib
except ImportError:
import urllib.request as urllib
from datasets import imagenet
from nets import inception
from preprocessing import inception_preprocessing
from tensorflow.contrib import slim
image_size = inception.inception_v1.default_image_size
with tf.Graph().as_default():
url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'
image_string = urllib.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(processed_images, num_classes=1001, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
slim.get_model_variables('InceptionV1'))
with tf.Session() as sess:
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
names = imagenet.create_readable_names_for_imagenet_labels()
for i in range(5):
index = sorted_inds[i]
print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index]))
from datasets import dataset_utils
import tensorflow as tf
url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
import numpy as np
import os
import tensorflow as tf
try:
import urllib2
except ImportError:
import urllib.request as urllib
from datasets import imagenet
from nets import vgg
from preprocessing import vgg_preprocessing
from tensorflow.contrib import slim
image_size = vgg.vgg_16.default_image_size
with tf.Graph().as_default():
url = 'https://upload.wikimedia.org/wikipedia/commons/d/d9/First_Student_IC_school_bus_202076.jpg'
image_string = urllib.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(vgg.vgg_arg_scope()):
# 1000 classes instead of 1001.
logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
slim.get_model_variables('vgg_16'))
with tf.Session() as sess:
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
names = imagenet.create_readable_names_for_imagenet_labels()
for i in range(5):
index = sorted_inds[i]
# Shift the index of a class name by one.
print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index+1]))
# Note that this may take several minutes.
import os
from datasets import flowers
from nets import inception
from preprocessing import inception_preprocessing
from tensorflow.contrib import slim
image_size = inception.inception_v1.default_image_size
def get_init_fn():
Returns a function run by the chief worker to warm-start the training.
checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"]
exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
return slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
variables_to_restore)
train_dir = '/tmp/inception_finetuned/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
slim.losses.softmax_cross_entropy(logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
init_fn=get_init_fn(),
number_of_steps=2)
print('Finished training. Last batch loss %f' % final_loss)
import numpy as np
import tensorflow as tf
from datasets import flowers
from nets import inception
from tensorflow.contrib import slim
image_size = inception.inception_v1.default_image_size
batch_size = 3
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, images_raw, labels = load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
checkpoint_path = tf.train.latest_checkpoint(train_dir)
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_path,
slim.get_variables_to_restore())
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
sess.run(tf.initialize_local_variables())
init_fn(sess)
np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])
for i in range(batch_size):
image = np_images_raw[i, :, :, :]
true_label = np_labels[i]
predicted_label = np.argmax(np_probabilities[i, :])
predicted_name = dataset.labels_to_names[predicted_label]
true_name = dataset.labels_to_names[true_label]
plt.figure()
plt.imshow(image.astype(np.uint8))
plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
plt.axis('off')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 1a
Step2: Observe the best result. What is the best technique?<br>
Step3: Values of the hyperparameter C
Step4: Question 2b
Step5: Question 2c
Step6: Question 2d
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
import numpy as np # Math
import scipy.io # Import data
import time
import sklearn.neighbors, sklearn.linear_model, sklearn.ensemble, sklearn.naive_bayes # Baseline classification techniques
import matplotlib.pyplot as plt
# Load 400 text documents representing 5 classes
# X_train matrix contains the training data
# y_train vector contains the training labels
# X_test matrix contains the test data
# y_test vector contains the test labels
[X_train, y_train, X_test, y_test] = np.load('datasets/20news_5classes_400docs.npy')
print('X_train size=',X_train.shape)
print('X_test size=',X_test.shape)
print('y_train size=',y_train.shape)
print('y_test size=',y_test.shape)
train_accuracy = YOUR CODE HERE
test_accuracy = YOUR CODE HERE
exec_time =YOUR CODE HERE
num_folds = 5
X_train = X_train.toarray() # for np.array_split
X_train_folds = np.array_split(YOUR CODE HERE)
y_train_folds = YOUR CODE HERE
C_choices = [1e-2, 5*1e-2, 1e-1, 5*1e-1, 1e0, 5*1e0, 1e1, 5*1e1, 1e2, 5*1e2, 1e3, 5*1e3]
num_Cs = len(C_choices)
accuracy_tab = np.zeros([num_folds,num_Cs])
for C_idx, C_value in enumerate(C_choices):
for fold_idx in range(num_folds):
# Extract train dataset for the current fold
fold_x_train = np.concatenate([X_train_folds[i] for i in range(num_folds) if i!=fold_idx])
fold_y_train = YOUR CODE HERE
# validation dataset for the current fold
fold_x_val = X_train_folds[fold_idx]
fold_y_val = YOUR CODE HERE
# Run Logistic Regression model for the current fold
accuracy = YOUR CODE HERE
# Store accuracy value
accuracy_tab[fold_idx,C_idx] = accuracy
print(accuracy_tab)
# plot the raw observations
for C_idx, C_value in enumerate(C_choices):
accuracies_C_idx = accuracy_tab[:,C_idx]
plt.scatter(YOUR CODE HERE)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = YOUR CODE HERE
accuracies_std = YOUR CODE HERE
plt.errorbar(np.log(C_choices), accuracies_mean, yerr=accuracies_std)
# Add text
plt.title('Cross-validation on C')
plt.xlabel('log C')
plt.ylabel('Cross-validation accuracy')
plt.show()
idx_best_C = YOUR CODE HERE
accuracy_testset = YOUR CODE HERE
print('best accuracy=',accuracy_testset)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3.1 Load data
Step2: 3.2 Comparing estimators
Step3: The default estimator configured in Variogram is the Mathéron estimator (Mathéron, 1963). It is defined like
Step4: Following the histogram, we should set a maxlag. This property accepts a number $0 < maxlag < 1$ to set the maxlag to this ratio of the maximum separating distance. A number > 1 will use this at an absolute limit. You can also pass 'mean' or 'median'. This will calculate and set the mean or median of all distances in the distance matrix as maxlag.
Step5: 3.3 Alternative estimators
Step6: The important part is here that the effective range as well as the sill is changeing for the estimator. This will likely change the Kriging result.
Step7: It's hard to see this by eye, but there are actually some differences. Let's load the original field as well and see which estimator performed best.
|
<ASSISTANT_TASK:>
Python Code:
import skgstat as skg
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
s = [30, 80, 300]
data1 = skg.data.pancake(N=s[0], seed=42, as_dataframe=True).get('sample')
data2 = skg.data.pancake(N=s[1], seed=42, as_dataframe=True).get('sample')
data3 = skg.data.pancake(N=s[2], seed=42, as_dataframe=True).get('sample')
def plot_scatter(data, ax):
art = ax.scatter(data.x, data.y, 50, c=data.v, cmap='plasma')
plt.colorbar(art, ax=ax)
fig, axes = plt.subplots(1, 3, figsize=(18, 5))
for data, ax in zip((data1, data2, data3), axes.flatten()):
plot_scatter(data, ax)
# calculate all variograms, but use only the second one
V1 = skg.Variogram(data2[['x', 'y']].values, data2.v.values, normalize=False, n_lags=8, use_nugget=True)
V2 = skg.Variogram(data2[['x', 'y']].values, data2.v.values, normalize=False, n_lags=15, use_nugget=True)
V3 = skg.Variogram(data2[['x', 'y']].values, data2.v.values, normalize=False, n_lags=25, use_nugget=True)
vario = V2
vario.plot(show=False);
vario.maxlag = 'median'
vario.plot(show=False);
fig, _a = plt.subplots(1, 3, figsize=(12, 3), sharey=True)
axes = _a.flatten()
for ax, estimator_name in zip(axes, ('matheron', 'cressie', 'dowd')):
vario.estimator = estimator_name
vario.plot(axes=ax, hist=False, show=False)
ax.set_title(estimator_name.capitalize())
xx, yy = np.mgrid[100:199:100j, 0:99:100j]
fig, _a = plt.subplots(1, 3, figsize=(18, 6))
axes = _a.flatten()
fields = []
for ax, est in zip(axes, ('matheron', 'cressie', 'dowd')):
vario.estimator = est
ok = skg.OrdinaryKriging(vario, min_points=5, max_points=15, mode='exact')
field = ok.transform(xx.flatten(), yy.flatten()).reshape(xx.shape)
fields.append(field)
art = ax.matshow(field, origin='lower', cmap='plasma', vmin=vario.values.min(), vmax=vario.values.max())
plt.colorbar(art, ax=ax)
ax.set_title(est.capitalize())
pan = skg.data.pancake_field().get('sample')
# rescale to 100x100
# pan = zoom(pan, 0.2)
# select the same extend
pan = pan[100:200, 0:100]
fig, axes = plt.subplots(2, 2, figsize=(10,10), sharex=True, sharey=True)
# original
m = axes[0][0].matshow(pan, origin='lower', cmap='Reds_r')
plt.colorbar(m, ax=axes[0][0])
axes[0][0].set_title('Original')
# matheron
m = axes[0][1].matshow(np.abs(pan - fields[0]), origin='lower', cmap='YlGn_r', vmin=0, vmax=25)
plt.colorbar(m, ax=axes[0][1])
axes[0][1].set_title(f'|Original - Matheron|; Diff: {np.abs(pan - fields[0]).mean().round(1)}')
# cressie
m = axes[1][0].matshow(np.abs(pan - fields[1]), origin='lower', cmap='YlGn_r', vmin=0, vmax=25)
plt.colorbar(m, ax=axes[1][0])
axes[1][0].set_title(f'|Original - Cressie| Diff: {np.abs(pan - fields[1]).mean().round(1)}')
# dowd
m = axes[1][1].matshow(np.abs(pan - fields[2]), origin='lower', cmap='YlGn_r', vmin=0, vmax=25)
plt.colorbar(m, ax=axes[1][1])
axes[1][1].set_title(f'|Original - Dowd| Diff: {np.abs(pan - fields[2]).mean().round(1)}')
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Define the query to compute the histogram
Step3: Fetch the histogram data into a pandas dataframe
Step4: Histogram plotting
|
<ASSISTANT_TASK:>
Python Code:
# connect to PostgreSQL using psycopg2
# !pip install psycopg2-binary
import psycopg2
# Connect to an existing database and create the test table
with psycopg2.connect("dbname=postgres user=postgres host=localhost port=5432 password=mysecretpassword") as pg_conn:
cur = pg_conn.cursor()
# use this drop statement if you need to recreate the table
# cur.execute("DROP TABLE data")
cur.execute("CREATE TABLE data as select random()*100 random_value from generate_series(1, 100);")
table_name = "data" # table or temporary view containing the data
value_col = "random_value" # column name on which to compute the histogram
min = -20 # min: minimum value in the histogram
max = 90 # maximum value in the histogram
bins = 11 # number of histogram buckets to compute
step = (max - min) / bins
query = f
with hist as (
select
width_bucket({value_col}, {min}, {max}, {bins}) as bucket,
count(*) as cnt
from {table_name}
group by bucket
),
buckets as (
select generate_series as bucket from generate_series(1,{bins})
)
select
bucket, {min} + (bucket - 0.5) * {step} as value,
coalesce(cnt, 0) as count
from hist right outer join buckets using(bucket)
order by bucket
import pandas as pd
# query Oracle using ora_conn and put the result into a pandas Dataframe
with psycopg2.connect("dbname=postgres user=postgres host=localhost port=5432 password=mysecretpassword") as pg_conn:
hist_pandasDF = pd.read_sql(query, con=pg_conn)
# Decription
#
# bucket: the bucket number, range from 1 to bins (included)
# value: midpoint value of the given bucket
# count: number of values in the bucket
hist_pandasDF
# Optionally normalize the event count into a frequency
# dividing by the total number of events
hist_pandasDF["frequency"] = hist_pandasDF["count"] / sum(hist_pandasDF["count"])
hist_pandasDF
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]})
f, ax = plt.subplots()
# histogram data
x = hist_pandasDF["value"]
y = hist_pandasDF["count"]
# bar plot
ax.bar(x, y, width = 3.0, color='red')
ax.set_xlabel("Bucket values")
ax.set_ylabel("Event count")
ax.set_title("Distribution of event counts")
# Label for the resonances spectrum peaks
txt_opts = {'horizontalalignment': 'center',
'verticalalignment': 'center',
'transform': ax.transAxes}
plt.show()
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]})
f, ax = plt.subplots()
# histogram data
x = hist_pandasDF["value"]
y = hist_pandasDF["frequency"]
# bar plot
ax.bar(x, y, width = 3.0, color='blue')
ax.set_xlabel("Bucket values")
ax.set_ylabel("Event frequency")
ax.set_title("Distribution of event frequencies")
# Label for the resonances spectrum peaks
txt_opts = {'horizontalalignment': 'center',
'verticalalignment': 'center',
'transform': ax.transAxes}
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can categorize triangles into three categories based on the properties of their angles.
Step2: Distance between two points
Step3: Finding two-dimensional TDOA
Step4: Now write a function that takes in the four parameters above, but an additional one called $v$ which is the velocity of the wave source. Have this function return an array/list of length four with the time it takes for the wave to reach of the sensors. Call it time_to_sensor.
Step5: Now we will write a method that returns the "Time distance of arrival" relative to sensor one. This means that we shift our position in time such the time it takes to reach sensor one is zero. We can do this by subtracting the time it takes to get to sensor one from every single element in the return value from the time_to_sensor method.
Step6: Now we have successfully written a method that can return the theoretical TDOA to our sensors of a wave source! However, what we would ideally like is a way that can identify what the position $(x,y)$ is given the time distance of arrivals. The implementation for this is rather difficut, so we will not go into it. However, it is the same method that is used by seismologists to detect an earthquake, and what a GPS might use to determine your location!
|
<ASSISTANT_TASK:>
Python Code:
#Write your code here
#Solution
def isValidTriangle(arg_1, arg_2, arg_3):
if(arg_1 + arg_2 + arg_3 == 180):
print "YES"
else:
print "NO"
#Write your functions here
import math
#Solutions
def find_hypotenuse(a, b):
return math.sqrt(a*a+b*b)
def find_leg(a,b):
if(a > b):
return math.sqrt(a*a-b*b)
else:
return math.sqrt(b*b-a*a)
#Write your function here
#Solution
def distance(x1,y1,x2,y2):
return math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))
#Write your function below
#Solution
def distance_to_sensor(a,b,x,y):
distance_one = distance(0,0,x,y)
distance_two = distance(a,0,x,y)
distance_three = distance(0,b,x,y)
distance_four = distance(a,b,x,y)
return [distance_one, distance_two, distance_three, distance_four]
#Write your function below
#Soluton
def time_to_sensor(a,b,x,y,v):
distances = distance_to_sensor(a,b,x,y)
time_one = distances[0]/v;
time_two = distances[1]/v;
time_three = distances[2]/v;
time_four = distances[3]/v;
#return map(lambda x: x/v, distances)
return [time_one, time_two, time_three, time_four]
#Write your function below
#Soluton
def TDOA(a,b,x,y,v):
times = time_to_sensor(a,b,x,y,v)
offset = times[0];
return map(lambda x: x-offset, times)
#definition of cost function
def err(TDOA1, TDOA2):
ans = 0
for i in range(1,4):
ans += (TDOA1[i] - TDOA2[i])*(TDOA1[i] - TDOA2[i])
return ans
#Write your method below
#Solution
def find_point(a, time_differences, v):
currentX = -1
currentY = -1
min_error = 100000000
x = 0.0
while(x <= a):
y = 0.0
while(y <= a):
error = err(time_differences, TDOA(a,a,x,y,v))
if(error < min_error):
min_error = error
currentX = x
currentY = y
y = y+a/100.0
x = x+a/100.0
return (currentX,currentY)
print find_point(1, [0, 0, .618033, .618033], 1)
print TDOA(1,1, .5, 0, 1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-mm', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Drop NaN Values
Step2: Drop rows with all NaN values
Step3: Drop rows with NaN values in the x column.
Step4: Drop and keep rows atleast 3 non-NaN values
Step5: Drop columns with all NaN values
Step6: Drop columns with any NaN values in row 3.
Step7: Fill NaN values
Step8: Replace all NaN values with the first non-NaN value above in column
Step9: Replace all NaN values with the first non-NaN value below in column
Step10: Replace atmost 2 NaN values with the first non-NaN value below in column
Step11: Replace all NaN values with the first non-NaN value to the left in the row
Step12: Replace all NaN values with the first non-NaN value to the right in the row
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from plydata import call
df = pd.DataFrame({
'w': [1, 2, np.nan, 4, 5],
'x': [np.nan, 2, np.nan, 4, 5],
'y': [np.nan] * 4 + [5],
'z': [np.nan] * 5
})
df
df >> call(pd.DataFrame.dropna)
# You can call method on the dataframe with '.method_name'
df >> call('.dropna')
df >> call('.dropna', how='all')
df >> call('.dropna', subset=['x'])
df >> call('.dropna', thresh=3)
df >> call('.dropna', axis=1, how='all')
df >> call('.dropna', axis=1, subset=[3])
df >> call(pd.DataFrame.fillna, -1)
df >> call('.fillna', -1)
df >> call('.fillna', method='ffill')
df >> call('.fillna', method='bfill')
df >> call('.fillna', method='bfill', limit=2)
df >> call('.fillna', method='ffill', axis=1)
df >> call('.fillna', method='bfill', axis=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The core of the numpy package is the array class. Let's examine that first. We can make an array out of a sequence, like a list.
Step2: data types
Step3: You can query the datatype by examining the dtype attribute of the array.
Step4: Array types may be defined explicity in the call
Step5: Complex numbers are noted with a lowercase j or uppercase J, like this
Step6: As we have seen before, arrays are like multidimensional sequences. We can create a 2D array by supplying a list of lists as the argument.
Step7: Array attributes
Step8: Setting array shape
Step9: Singleton dimensions add to the dimensionality of an array. The last example was a 1D array (also called a vector), the next are 2D arrays.
Step10: Array indexing
Step11: Exercise
Step12: Mean takes the optional argument axis that can be used to take the mean along a single axis of the array. Just like with indexing, the axes are reference in a zero-based system; axis=0 means the first dimension.
Step13: In this case, there are two rows in the first dimension, and arr.mean(axis=0) takes the average in the 'row' direction, resulting in a 1D array that is the average across the rows.
Step14: You can find the mininum and maximum of an array with the min and max methods. Sometimes it is useful to find the indices of these minima and maxima. For this use argmin and argmax, like
Step15: Creating standard arrays
Step16: You can also create these arrays with the same shape and datatype of the input array using np.ones_like and np.zeros_like.
Step17: You can also create a diagonal array with a given vector along the diagonal. These can be offset with an optional argument k (default=0). This example creates a tri-diagonal array similar to that used for finite difference calculations
Step18: There are also a number of ways to generate sequences of numbers.
Step19: You can create arrays of random numbers easily with methods in np.random.
Step20: Exercise
Step21: There are a number of convenience functions that act like concatenate for specific axes
Step22: Likewise, arrays can be split with np.split or np.array_split. There are also convenience functions to split horizontally, vertically, and with depth.
Step23: Exercise
Step24: The boolean array can be used as an index to other arrays. Note this will return a 1D array, no matter what dimension the origial arrays are, because there is no way to know what structure the True values have.
Step25: To get the indices of the places where the conditional is true (i.e., the locations of the True values in the boolean array), use the np.where command.
Step26: Note that np.where always returns a tuple of indices for each dimension. This is a little strange for 1D arrays, but is done for consistency across all input values. Often, you will want to explicitly pull out the (single) array of indices from the tuple, like
Step27: What happens with the [0] is missing behind the call to where?
Step28: Normally, this will not be a problem, but if you need to make sure that a subset of an array has it's own memory, make sure you make a copy of the array, like
Step29: Array broadcasting
Step30: The term broadcasting describes how numpy treats arrays with different shapes during arithmetic operations. Subject to certain constraints, the smaller array is “broadcast” across the larger array so that they have compatible shapes. Broadcasting provides a means of vectorizing array operations so that looping occurs in C instead of Python. It does this without making needless copies of data and usually leads to efficient algorithm implementations.
Step31: The result is equivalent to the previous example where b was an array. We can think of the scalar b being stretched during the arithmetic operation into an array with the same shape as a. The new elements in b are simply copies of the original scalar. The stretching analogy is only conceptual. NumPy is smart enough to use the original scalar value without actually making copies, so that broadcasting operations are as memory and computationally efficient as possible.
Step32: Here are examples of shapes that do not broadcast
Step33: Exercise
Step34: the flat attribute on the other hand gives a view of the array in 1D. It looks like an iterator object (like range and zip). This allows
Step35: In contrast, this does not work as expected. WHY?
Step36: Other operations can be done to the array first. For example, we can take a transpose of the array before we flatten it.
Step37: Here, the T attribute (equivalent to the a.transpose() method) gives a view of the array transposed (similar to MATLAB's tick notation).
Step38: Masked arrays
Step39: The mask can also be supplied explicity when creating the masked array,
Step40: Importing data
Step41: Exercise
Step42: Let's plot it to make sure this makes sense
Step43: Once you have the fit, you can use it to find other useful things, like the value of the fitted line at $x=1$
Step44: You can also use the np.polynomial.Polynomial class to work with polynomials. Note, these define polynomials the opposite way, with the lowest order first. The Polynomial class gives an excellent example of operator overloading, and the flexibility of classes.
Step45: You can use the Polynomial object to evaluate the value of the polynomial at various input values
Step46: We can use this to make a plot to see the function
Step47: Other things we can do
Step48: Vectorization
Step49: The better way is to directly multiply the arrays together, taking advantage of C code that Python has in the background.
Step50: Basic performance evaluation
Step51: %time runs the line once and gives the time required. However, calculation times vary depending on many things including the numbers involved and the state of your computer at the moment. In this case, the %timeit function can be used to perform the test a number of times to get an average calculation time.
Step52: For statements that are longer than a single line, the time.time function can be used.
Step53: Exercise
Step54: Matrix multiplication is done using the np.dot function. In this case, matrices do not need to be the same shape, but must follow the rules of matrix multiplication. E.g., the operation dot(<4x5 array>, <5x12 array>) results in a 4x12 array; i.e., the inner dimensions must match (technically last and second-to-last, for arrays with more than two dimensions).
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
d = [1, 2, 3, 4, 5]
np.array(d)
d = [1, 2, 3.1415, 4, 5]
np.array(d)
d = [1, 2, 3.1415, 4, 5]
arr = np.array(d)
arr.dtype
arr = np.array([1, 2, 3, 4, 5], dtype='float32')
arr
cmplx = np.array([1.0+2.0j, 3.0+4.0J])
print(cmplx)
cmplx.dtype
arr = np.array([[1., 2., 3.,], [4., 5., 6.]])
arr
arr.size # The number of elements in the array
arr.shape # The shape of the array (i.e., the size of each dimension)
arr.ndim # The number of dimensions of the array
arr.shape = (3, 2)
arr
arr.shape = (6,)
arr
arr.shape = (1, 6)
arr # Note that there are *two* square brackets in the output sequence. This is a row vector.
arr.shape = (6, 1)
arr # this is also a 2D array, like a column vector
arr = np.arange(60).reshape(6, 10)
arr
arr[:, 4] # the 5th column
arr[2, :] # the 3rd row
arr[2] # Trailing colons do not need to be explicitly typed. This is equivalent to the last example.
arr[4, 7] # an individual element in the table
arr = np.array([[1., 2., 3.,], [4., 5., 6.]]) # reset the array to our 2x3 array.
arr.mean() # The mean of all of the elements in the array
arr.mean(axis=0) # The mean
arr
arr.reshape(3, 2)
x = np.random.rand(10)
imax = x.argmax()
print(imax, x[imax], x.max())
o = np.ones((3, 4, 5)) # The argument is a shape, so is a tuple with the length of each dimension as an argument
b = np.ones((2, 3), dtype=np.bool)
z = np.zeros((2, 3), dtype=np.float32)
b
zb = np.zeros_like(b)
zb
np.diag(-2*np.ones(6)) + np.diag(np.ones(5), k=-1) + np.diag(np.ones(5), k=1)
np.arange(10.)
np.arange(2, 10, 2)
np.linspace(2, 4, 17)
np.random.rand(2, 4)
np.random.randint(1, 50, (2, 4))
x = np.random.rand(4, 5, 6)
y = np.random.rand(4, 5, 6)
print(np.concatenate((x, y)).shape)
print()
print(np.concatenate((x, y), axis=0).shape)
print(np.concatenate((x, y), axis=1).shape)
print(np.concatenate((x, y), axis=2).shape)
print(np.vstack((x, y)).shape)
print(np.hstack((x, y)).shape)
print(np.dstack((x, y)).shape)
x = np.random.rand(12, 2, 5)
[a.shape for a in np.split(x, 4, axis=0)]
x = np.random.rand(5, 5)
print(x)
ind = x > 0.5
print(ind)
x = np.random.rand(5, 5)
y = np.sin(x)
y[x > 0.5]
# or, equivalently, as two lines
idx = x > 0.5
y[idx]
x = np.random.rand(5, 5)
idx = np.where(x > 0.5)
idx
x = np.random.rand(10)
idx = np.where(x>0.5)[0]
print(idx)
a = np.arange(10.0)
b = a[::2]
print(a)
print(b)
a[4] = -999 # this will modify b as well...
print(a)
print(b)
b[-1] = -888 # this will modify a as well...
print(a)
print(b)
a = np.arange(10.0)
b = a.copy()[::2] # or np.copy(a)
a[4] = -999 # this will NOT modify b now
print(a)
print(b)
a = np.array([1.0, 2.0, 3.0])
b = np.array([2.0, 2.0, 2.0])
a * b
a = np.array([1.0, 2.0, 3.0])
b = 2.0
a * b
A = np.random.rand(15, 3, 5)
B = np.random.rand(3, 1)
print(A.shape, B.shape)
Result = A * B
print(Result.shape)
A = np.random.rand(5, 4)
B = np.random.rand(5)
A*B
print(B.shape)
print(B[:,np.newaxis].shape)
(A*B[:,np.newaxis]).shape
a = np.arange(12).reshape(3, 4)
print(a)
b = a.flatten()
print(b)
a.flat[6] = -999
print(a)
a.flatten()[5] = -888
print(a)
a.T.flat[6] = -998
print(a)
print(a.T)
arr = np.random.randn(7, 8)
cond = arr > 0.1 # `cond` is True for the random values greater than 0.5
marr = np.ma.masked_where(cond, arr)
print(marr)
marr.mean(axis=0)
marr = np.ma.masked_array([1, 2, 3, 4, 5], mask=[True, True, False, False, True])
marr
data = np.loadtxt('../data/CTD.txt', comments='*')
data[:,1] # a column of data representing temperature
import numpy as np
x = np.random.rand(100)
y = 5 + 3*x + 0.1*np.random.randn(100) # A straight line with some noise
p = np.polyfit(x, y, 3) # fit a straight line (order is 1)
print(p) # The coefficients of the polynomial, with highest order first. (i.e,. [slope, intercept])
import matplotlib.pyplot as plt
%matplotlib inline
# plot data
plt.plot(x, y, '.')
# plot fitted line
plt.plot(x, p[0]*x + p[1])
# plt.legend(('Data', 'Fitted line'))
np.polyval(p, 1)
p1 = np.polynomial.Polynomial([5, 3]) # y = 5 + 3 x
p2 = np.polynomial.Polynomial([3, 6, 8, 2]) # y = 3 + 6 x + 8 x**2 + 2 x**3
print('Evaluation')
print('p1(0.0) = ', p1(0))
print('p2(5.0) = ', p2(5))
x = np.linspace(0,10)
plt.plot(x, p1(x), x, p2(x))
plt.legend(['p1', 'p2'])
print('Roots')
print('Roots of p2 = ', p2.roots())
print()
print('Operations')
print('p1 + p2 = ', p1 + p2)
print('p1 * p2 = ', p1 * p2)
print()
print('Calculus')
print('Derivative of p1', p1.deriv(1))
print('Integral of p2', p2.integ(4, k=[4, 3, 2, 1]))
a = np.arange(102400.0).reshape(4, 8, 1600, 2) # a 4D array using sequential numbers
b = np.random.rand(4, 8, 1600, 2) # a 4D array using random numbers
li, lj, lk, lm = b.shape # size of b in each dimension
sol = np.zeros(b.shape)
for i in range(li):
for j in range(lj):
for k in range(lk):
for m in range(lm):
sol[i,j,k,m] = a[i,j,k,m]*b[i,j,k,m]
print(sol.sum())
sol = a * b # element-by-element multiplication. This operation is about as fast as it can be on your computer.
print(sol.sum())
b = np.random.randn(5000, 2000)
%time u, s, v = np.linalg.svd(b)
%timeit b = np.random.randn(50, 20); u, s, v = np.linalg.svd(b)
import time
t_start = time.time()
time.sleep(0.25) # Do nothing for 0.25 seconds
t_stop = time.time()
print('{:6.4f} seconds have passed.'.format(t_stop-t_start))
a = np.random.randn(100, 100)
e, v = np.linalg.eig(a)
b = np.random.randn(500, 200)
u, s, v = np.linalg.svd(b)
x = np.random.rand(4, 5)
y = np.random.rand(5, 12)
res = np.dot(x, y)
print(res.shape)
# np.dot(y, x) # This gives an error -- order is important.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Numerical stability, dispersion and anisotropy of the 2D acoustic finite difference modelling code
Step2: To seperate modelling and visualization of the results, we introduce the following plotting function
Step3: Numerical Grid Dispersion
Step4: $N_\lambda = 7.5$ gridpoints per minimum wavelength. Let's increase it to $N_\lambda = 12$, which yields ...
Step5: ... an improved fit of the 2D analytical by the FD solution.
|
<ASSISTANT_TASK:>
Python Code:
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../../style/custom.css'
HTML(open(css_file, "r").read())
# Import Libraries
# ----------------
import numpy as np
from numba import jit
import matplotlib
import matplotlib.pyplot as plt
from pylab import rcParams
# Ignore Warning Messages
# -----------------------
import warnings
warnings.filterwarnings("ignore")
# Definition of modelling parameters
# ----------------------------------
xmax = 5000.0 # maximum spatial extension of the 1D model in x-direction (m)
zmax = xmax # maximum spatial extension of the 1D model in z-direction (m)
dx = 10.0 # grid point distance in x-direction (m)
dz = dx # grid point distance in z-direction (m)
tmax = 0.8 # maximum recording time of the seismogram (s)
dt = 0.0010 # time step
vp0 = 3000. # P-wave speed in medium (m/s)
# acquisition geometry
xr = 2000.0 # x-receiver position (m)
zr = xr # z-receiver position (m)
xsrc = 2500.0 # x-source position (m)
zsrc = xsrc # z-source position (m)
f0 = 20. # dominant frequency of the source (Hz)
t0 = 4. / f0 # source time shift (s)
# FD_2D_acoustic code with JIT optimization
# -----------------------------------------
@jit(nopython=True) # use Just-In-Time (JIT) Compilation for C-performance
def FD_2D_acoustic_JIT(dt,dx,dz,f0):
# define model discretization
# ---------------------------
nx = (int)(xmax/dx) # number of grid points in x-direction
print('nx = ',nx)
nz = (int)(zmax/dz) # number of grid points in x-direction
print('nz = ',nz)
nt = (int)(tmax/dt) # maximum number of time steps
print('nt = ',nt)
ir = (int)(xr/dx) # receiver location in grid in x-direction
jr = (int)(zr/dz) # receiver location in grid in z-direction
isrc = (int)(xsrc/dx) # source location in grid in x-direction
jsrc = (int)(zsrc/dz) # source location in grid in x-direction
# Source time function (Gaussian)
# -------------------------------
src = np.zeros(nt + 1)
time = np.linspace(0 * dt, nt * dt, nt)
# 1st derivative of a Gaussian
src = -2. * (time - t0) * (f0 ** 2) * (np.exp(- (f0 ** 2) * (time - t0) ** 2))
# Analytical solution
# -------------------
G = time * 0.
# Initialize coordinates
# ----------------------
x = np.arange(nx)
x = x * dx # coordinates in x-direction (m)
z = np.arange(nz)
z = z * dz # coordinates in z-direction (m)
# calculate source-receiver distance
r = np.sqrt((x[ir] - x[isrc])**2 + (z[jr] - z[jsrc])**2)
for it in range(nt): # Calculate Green's function (Heaviside function)
if (time[it] - r / vp0) >= 0:
G[it] = 1. / (2 * np.pi * vp0**2) * (1. / np.sqrt(time[it]**2 - (r/vp0)**2))
Gc = np.convolve(G, src * dt)
Gc = Gc[0:nt]
# Initialize model (assume homogeneous model)
# -------------------------------------------
vp = np.zeros((nx,nz))
vp2 = np.zeros((nx,nz))
vp = vp + vp0 # initialize wave velocity in model
vp2 = vp**2
# Initialize empty pressure arrays
# --------------------------------
p = np.zeros((nx,nz)) # p at time n (now)
pold = np.zeros((nx,nz)) # p at time n-1 (past)
pnew = np.zeros((nx,nz)) # p at time n+1 (present)
d2px = np.zeros((nx,nz)) # 2nd spatial x-derivative of p
d2pz = np.zeros((nx,nz)) # 2nd spatial z-derivative of p
# Initialize empty seismogram
# ---------------------------
seis = np.zeros(nt)
# Calculate Partial Derivatives
# -----------------------------
for it in range(nt):
# FD approximation of spatial derivative by 3 point operator
for i in range(1, nx - 1):
for j in range(1, nz - 1):
d2px[i,j] = (p[i + 1,j] - 2 * p[i,j] + p[i - 1,j]) / dx**2
d2pz[i,j] = (p[i,j + 1] - 2 * p[i,j] + p[i,j - 1]) / dz**2
# Time Extrapolation
# ------------------
pnew = 2 * p - pold + vp2 * dt**2 * (d2px + d2pz)
# Add Source Term at isrc
# -----------------------
# Absolute pressure w.r.t analytical solution
pnew[isrc,jsrc] = pnew[isrc,jsrc] + src[it] / (dx * dz) * dt ** 2
# Remap Time Levels
# -----------------
pold, p = p, pnew
# Output of Seismogram
# -----------------
seis[it] = p[ir,jr]
return time, seis, Gc, p # return last pressure wave field snapshot
# Compare FD Seismogram with analytical solution
# ----------------------------------------------
def plot_seis(time,seis_FD,seis_analy):
# Define figure size
rcParams['figure.figsize'] = 12, 5
plt.plot(time, seis_FD, 'b-',lw=3,label="FD solution") # plot FD seismogram
plt.plot(time, seis_analy,'r--',lw=3,label="Analytical solution") # plot analytical solution
plt.xlim(time[0], time[-1])
plt.title('Seismogram')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.legend()
plt.grid()
plt.show()
dx = 10.0 # grid point distance in x-direction (m)
dz = dx # grid point distance in z-direction (m)
f0 = 20 # centre frequency of the source wavelet (Hz)
# define
zeta = np.sqrt(2)
# calculate dt according to the CFL-criterion
dt = dx / (zeta * vp0)
%time time, seis_FD, seis_analy, p = FD_2D_acoustic_JIT(dt,dx,dz,f0)
plot_seis(time,seis_FD,seis_analy)
fmax = 2 * f0
N_lam = vp0 / (dx * fmax)
print("N_lam = ",N_lam)
N_lam = 12
dx = vp0 / (N_lam * fmax)
dz = dx # grid point distance in z-direction (m)
f0 = 20 # centre frequency of the source wavelet (Hz)
# define
zeta = np.sqrt(2)
# calculate dt according to the CFL-criterion
dt = dx / (zeta * vp0)
%time time, seis_FD, seis_analy, p = FD_2D_acoustic_JIT(dt,dx,dz,f0)
plot_seis(time,seis_FD,seis_analy)
# define dx/dz and calculate dt according to the CFL-criterion
dx = 10.0 # grid point distance in x-direction (m)
dz = dx # grid point distance in z-direction (m)
# define zeta for the CFL criterion
zeta = np.sqrt(2)
dt = dx / (zeta * vp0)
f0 = 100
time, seis_FD, seis_analy, p = FD_2D_acoustic_JIT(dt,dx,dz,f0)
# Plot last pressure wavefield snapshot at Tmax = 0.8 s
# -----------------------------------------------------
rcParams['figure.figsize'] = 8, 8 # define figure size
clip = 1e-7 # image clipping
extent = [0.0, xmax/1000, 0.0, zmax/1000]
# Plot wavefield snapshot at tmax = 0.8 s
plt.imshow(p.T,interpolation='none',cmap='seismic',vmin=-clip,vmax=clip,extent=extent)
plt.title('Numerical anisotropy')
plt.xlabel('x (km)')
plt.ylabel('z (km)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Is TOC increasing?
Step3: So there is an increasing overall trend (as we often find in our real data).
Step4: In general, the slopes seem to decrease through time, but is this change significant?
Step6: So there is evidience for a significant decrease in the rate of increase of TOC through time.
|
<ASSISTANT_TASK:>
Python Code:
# Fake data
# Rising TOC, but *rise rate* decreasing through time
x = np.arange(1990, 2013)
y = np.log(1.5*(x-1989)) + np.random.uniform(low=0, high=1, size=len(x))
df = pd.DataFrame({'TOC':y}, index=x)
df.plot()
# M-K test
res = icpw.mk_test(df['TOC'].values, 'test_stn', 'TOC')
print 'M-K test results: %s (p=%.4f)' % (res[4], res[3])
# Sen's slope
slp, intcp, lo, hi = theilslopes(y, x)
# Plot
y2 = slp*x + intcp
plt.plot(x, y)
plt.plot(x, y2)
# Function for moving window
def sens_slp(df):
Calculate Sen's slope estimate.
from scipy.stats import theilslopes
res = theilslopes(df)
return res[0]
# Calc rolling slopes
df2 = df.rolling(center=False, window=12).apply(func=sens_slp)
df2.plot()
plt.title('12 year windowed slopes (indexed by last year in each window)')
plt.ylabel('Slope')
# Drop Nan
df2.dropna(inplace=True)
# M-K test
res = icpw.mk_test(df2['TOC'].values, 'test_stn', 'TOC')
print 'M-K test results: %s (p=%.4f)' % (res[4], res[3])
# Sen's slope
slp, intcp, lo, hi = theilslopes(df2['TOC'], df2.index)
# Plot
y3 = slp*df2.index + intcp
plt.plot(df2.index, df2['TOC'])
plt.plot(df2.index, y3)
def sens_intcp(df):
Get intercept to go with slope estimate.
from scipy.stats import theilslopes
res = theilslopes(df)
return res[1]
# Get intercepts
incpts = df.rolling(center=False,window=10).apply(func=sens_intcp)
incpts.columns = ['c']
df2 = df2.join(incpts)
df2
fig = plt.figure(figsize=(15, 15))
# Plot slopes for each 10 year window
for yr in range(2001, 2013):
xi = np.arange(yr-11, yr+1)
slp = df2.ix[yr, 'TOC']
inc = df2.ix[yr, 'c']
yi = slp*(xi-yr+11) + inc
plt.plot(xi, yi, c='k', alpha=0.5)
# Original data
plt.plot(x, y, lw=5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Dataset
Step2: The data contains the following features
Step3: Most numerical data appear to be fairly complete, with the exception of fare (which only has one missing value) and age (which has 263 missing values). We can deal with the missing values later.
Step4: A fairly obvious observation here is that the PassengerId variable is not very useful -- we should drop this column. The rest of the data is quite interesting, with most passengers being somewhat young (around 20 to 30 years of age) and most people traveling without too much family.
Step5: Data Preprocessing
Step6: Feature Engineering
Step7: Let's tabulate our titles against sex to see the frequency of the various titles.
Step8: We see that with the exception of Master, Mr, Miss, and Mrs, the other titles are relatively rare. We can group rare titles together to simplify our analysis. Also note that Mlle and Ms are synonymous with Miss, and Mme is synonymous with Mrs.
Step9: We have 875 unique surnames.
Step10: The chart above clearly shows an interesting phenomenon -- single people and families of over 4 people have a significantly lower chance of survival than those in small (2 to 4 person) families.
Step11: Dealing with Missing Values
Step12: It appears that age, cabin, embarked, and fare have missing values. Let's first work on "Embarked" and "Fare" given that there are few enough NaN's for us to be able to manually work out what values they should have. For Cabin, given that there are 1309 samples and more than 75% of them are missing, we can probably just drop this column. It might have been useful given that location on the ship might influence their chance of survival, but data is too sparse on this particular attribute.
Step13: Miss Icard and Mrs. Stone, both shared the same cabin, both survived, both paid the same fare, and are both of the same class, interestingly enough. Mr. Storey is of the third class and embarked from Southampton.
Step14: The closest value to the $80 fare paid by both ladies for first class is very close to the mean fare paid by first class passengers embarking from Southampton, but also aligns very nicely with the median fare paid by those embarking from Cherbourg. Perhaps a swarm plot will better show how passengers are distributed.
Step15: This is a tough call. Looking at the spread of the points, however, it seems that those that embarked from Southampton generally paid lower fares. It appears that the mean fare paid by those from Cherbourg is pulled up by the extreme outliers that paid more than \$500 for their tickets, with a majority of first class passengers indeed paying around $80. As such, we classify the two ladies as having embarked from Cherbourg (C).
Step16: The swarm plot also shows that the passengers embarking from Southampton in third class have paid around the same fare. It would be reasonable to use the mean value of third class passengers from Southampton as his fare value.
Step17: We could do mice imputation similar to Megan's notebook via the fancyimpute package.
Step18: We see that the MICE'd data has no more missing Age values. Plotting these values in the histogram
Step19: Most age values were added around the 20 to 30 year-old age range, which makes sense given the distribution of the ages in the data that we had. Note that the fancyimpute version of MICE uses Bayesian Ridge Regression. The density is not perfectly preserved but is useful enough to proceed with the analysis.
Step20: We can create some additional categorical columns based on our complete age feature -- whether the person is a child (18 or under) and whether a person is a mother (female, over 18, with children, and does not have the title "Miss").
Step21: Let's take a look at the dataset once again.
Step22: We ensure that all important categorical variables are dummy coded.
Step23: After we split the data back into training and test sets, our data set will be ready to use for modeling.
|
<ASSISTANT_TASK:>
Python Code:
# Import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn import metrics
train = pd.read_csv("train.csv", index_col='PassengerId')
test = pd.read_csv("test.csv", index_col='PassengerId')
train.head(3)
test.head(3)
# print(train.shape)
# print(test.shape)
print('Number of features: {}'.format(test.shape[1]))
print('Training samples: {}'.format(train.shape[0]))
print('Test samples: {}'.format(test.shape[0]))
print('Total number of samples: {}'.format(train.shape[0]+test.shape[0]))
# First, combine datasets
total = pd.concat([train, test])
# View summary statistics
total.describe()
# Generate histograms
sns.set_color_codes('muted')
total.hist(color='g')
plt.tight_layout()
plt.show()
totalwithoutnas = total.dropna()
scattermatrix = sns.pairplot(totalwithoutnas)
plt.show()
total.drop('Ticket', axis=1, inplace=True)
Surnames = pd.DataFrame(total['Name'].str.split(",").tolist(), columns=['Surname', 'Rest'])
Titles = pd.DataFrame(Surnames['Rest'].str.split(".").tolist(), columns=['Title', 'Rest1', 'Rest2'])
Surnames.drop('Rest',axis=1,inplace=True)
Titles = pd.DataFrame(Titles['Title'])
Surnames['Surname'].str.strip()
Titles['Title'].str.strip()
total['Surname'] = Surnames.set_index(np.arange(1,1310))
total['Title'] = Titles.set_index(np.arange(1,1310))
total.head()
pd.crosstab(total['Sex'], total['Title'])
raretitles = ['Dona', 'Lady', 'the Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer']
total.ix[total['Title'].str.contains('Mlle|Ms|Miss'), 'Title'] = 'Miss'
total.ix[total['Title'].str.contains('Mme|Mrs'), 'Title'] = 'Mrs'
total.ix[total['Title'].str.contains('|'.join(raretitles)), 'Title'] = 'Rare Title'
pd.crosstab(total['Sex'], total['Title'])
total['Surname'].nunique()
total['FamilySize'] = total['SibSp'] + total['Parch'] + 1
total['Family'] = total['Surname'] + "_" + total['FamilySize'].apply(str)
total.head(1)
# Plot family size
famsizebarplot = sns.countplot(total['FamilySize'].loc[1:len(train.index)], hue=total['Survived'])
famsizebarplot.set_xlabel('Family Size')
plt.show()
# Categorize family size
total['FamSizeCat'] = 'small'
total.loc[(total['FamilySize'] == 1), 'FamSizeCat'] = 'singleton'
total.loc[(total['FamilySize'] > 4), 'FamSizeCat'] = 'large'
# Create mosaic plot
# To be done in the future using statsmodel
total.isnull().sum()
total[(total['Embarked'].isnull()) | (total['Fare'].isnull())]
sns.boxplot(x='Embarked',y='Fare',data=train.dropna(),hue='Pclass')
plt.tight_layout()
plt.show()
trainwithoutnas = train.dropna()
print("Mean fares for passengers traveling in first class:")
print(trainwithoutnas[trainwithoutnas['Pclass']==1].groupby('Embarked')['Fare'].mean())
print("\nMedian fares for passengers traveling in first class:")
print(trainwithoutnas[trainwithoutnas['Pclass']==1].groupby('Embarked')['Fare'].median())
sns.swarmplot(x='Embarked',y='Fare',data=train.dropna(),hue='Pclass')
plt.show()
total.loc[(62,830), 'Embarked'] = "C"
total.loc[(62,830), 'Embarked']
total.loc[1044,'Fare'] = total[(total['Embarked']=="S") & (total['Pclass']==3)]['Fare'].mean()
total.loc[1044, ['Name','Fare']]
AgeHistogram = total['Age'].hist(bins=20, edgecolor="black")
AgeHistogram.set_xlabel("Age")
AgeHistogram.set_ylabel("Count")
AgeHistogram.set_title("Age (Prior to Missing Value Imputation)")
plt.show()
import fancyimpute
total.isnull().sum()
totalforMICE = total.drop(['Survived','Cabin','FamSizeCat','Family','Name','Surname'], axis=1)
# totalforMICE.fillna(np.nan)
totalforMICE['Sex'] = pd.get_dummies(totalforMICE['Sex'])['male']
dummycodedTitles = pd.get_dummies(totalforMICE['Title']).drop('Rare Title', axis=1)
totalforMICE = pd.merge(totalforMICE, dummycodedTitles, left_index=True, right_index=True, how='outer')
totalforMICE = totalforMICE.drop(['Title'],axis=1)
dummycodedEmbarked = pd.get_dummies(totalforMICE['Embarked'])[['C','Q']]
totalforMICE = totalforMICE.join(dummycodedEmbarked).drop(['Embarked'],axis=1)
dummycodedPclass = pd.get_dummies(totalforMICE['Pclass'], columns=[list("123")]).drop(3,axis=1)
totalforMICE = totalforMICE.join(dummycodedPclass).drop('Pclass',axis=1)
MICEdtotal = fancyimpute.MICE().complete(totalforMICE.values.astype(float))
MICEdtotal = pd.DataFrame(MICEdtotal, columns=totalforMICE.columns)
MICEdtotal.isnull().sum()
MICEAgeHistogram = MICEdtotal['Age'].hist(bins=20, edgecolor="black")
MICEAgeHistogram.set_xlabel("Age")
MICEAgeHistogram.set_ylabel("Count")
MICEAgeHistogram.set_title("Age (After Missing Value Imputation)")
plt.show()
AgeHists, AgeHistAxes = plt.subplots(nrows=1,ncols=2, figsize=(10,5), sharey=True)
AgeHistAxes[0].hist(total['Age'].dropna(), bins=20, edgecolor='black', normed=True)
AgeHistAxes[0].set_xlabel("Age")
AgeHistAxes[0].set_ylabel("Density")
AgeHistAxes[0].set_title("Age Density (Original Data)")
AgeHistAxes[1].hist(MICEdtotal['Age'], bins=20, edgecolor='black', normed=True)
AgeHistAxes[1].set_xlabel("Age")
AgeHistAxes[1].set_ylabel("Density")
AgeHistAxes[1].set_title("Age Density (After MICE)")
AgeHists.tight_layout()
AgeHists
newtotal = total
newtotal['Age'] = MICEdtotal['Age']
AgeandSexHist = sns.FacetGrid(newtotal.iloc[0:891,:], col = 'Sex', hue='Survived', size=5)
# AgeandSexHist.map(sns.distplot, 'Age', kde=False, hist_kws={'edgecolor':'black','stacked':True})
AgeandSexHist.map(plt.hist, 'Age', alpha=0.5, bins=20)
AgeandSexHist.add_legend()
# plt.close('all')
plt.show(AgeandSexHist)
AgeandSexHist, AgeandSexHistAxes = plt.subplots(nrows=1,ncols=2, figsize=(10,5), sharey=True)
AgeandSexHistAxes[0].hist([newtotal.loc[0:891, 'Age'].loc[(newtotal['Sex']=='male') & (newtotal['Survived']==1)],
newtotal.loc[0:891, 'Age'].loc[(newtotal['Sex']=='male') & (newtotal['Survived']==0)]],stacked=True, edgecolor='black', label=['Survived','Did Not Survive'], bins=24)
AgeandSexHistAxes[1].hist([newtotal.loc[0:891, 'Age'].loc[(newtotal['Sex']=='female') & (newtotal['Survived']==1)],
newtotal.loc[0:891, 'Age'].loc[(newtotal['Sex']=='female') & (newtotal['Survived']==0)]],stacked=True, edgecolor='black', bins=24)
AgeandSexHistAxes[0].set_title('Survival By Age for Males')
AgeandSexHistAxes[1].set_title('Survival By Age for Females')
for i in range(2):
AgeandSexHistAxes[i].set_xlabel('Age')
AgeandSexHistAxes[0].set_ylabel('Count')
AgeandSexHistAxes[0].legend()
plt.show()
# Create the 'Child' variable
newtotal['Child'] = 1
newtotal.loc[newtotal['Age']>=18, 'Child'] = 0
pd.crosstab(newtotal['Child'],newtotal['Survived'])
# Create the 'Mother' variable
newtotal['Mother'] = 0
newtotal.loc[(newtotal['Sex']=='female') & (newtotal['Parch'] > 0) & (newtotal['Age']>18) & (newtotal['Title'] != "Miss"), 'Mother'] = 1
pd.crosstab(newtotal['Mother'], newtotal['Survived'])
newtotal.head()
newtotal.shape
dummycodedFamSizeCat = pd.get_dummies(newtotal['FamSizeCat']).drop('large',axis=1)
newtotal = newtotal.drop(['Title','Embarked','Pclass', 'Cabin', 'Name', 'Family', 'Surname'], axis=1)
newtotal['Sex'] = pd.get_dummies(newtotal['Sex'])['male']
newtotal = newtotal.join(dummycodedEmbarked)
newtotal = newtotal.join(dummycodedPclass)
newtotal = newtotal.join(dummycodedTitles)
newtotal = newtotal.join(dummycodedFamSizeCat)
newtotal.head()
newtrain = newtotal.loc[:891,:]
newtest = newtotal.loc[892:,:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
sns.pairplot(tips , hue = ''sex', palette = 'coolwarm')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Demonstração da recontrução parcial cumulativa das "telhas" primitivas da imagem sintética acima. É exibida cada telha primitiva, fazendo a reconstrução da iDFT de apenas valores F(u,0) e F(-u,0) para u entre 0 e M-1.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from numpy.fft import fft2
from numpy.fft import ifft2
import sys,os
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
f = 50 * np.ones((128,128))
f[:, : 32] = 200
f[:,64+32: ] = 200
plt.imshow(f,cmap='gray')
plt.title('Original image')
plt.colorbar()
plt.show()
H,W = f.shape
N = W;
rows = (W//2)//(2/2)+1
plt.figure(figsize=[4,rows*2])
#1) Encontre a F = DFT(f) - Transformada Discreta de Fourier;
F = fft2(f)
E = ia.dftview(F)
ia.adshow(E, title='DFT')
#2) Crie um Faux zerada de mesmo tipo e shape de F. Neste Faux, primeiro coloque o Faux[0,0] = F[0,0] e calcule a inversa de Faux.
Faux = np.zeros_like(F)
Faux[0,0] = F[0,0]
plt.subplot(rows,2,1)
plt.imshow(np.real(ifft2(Faux)), cmap='gray');
plt.title("DFT inverse (u=0)")
Fsma = np.zeros_like(F)
Fsma = Fsma + Faux
plt.subplot(rows,2,2)
plt.imshow(np.real(ifft2(Fsma)),cmap='gray')
plt.title("Acumulative (u=%s)" % 0)
#3) repita com u variando de 1 a N/2: copie também F[0,u] e F[0,-u] e calcule a inversa. Lembrar que -u = N-u, pois F é periódica.
# Desta forma você vai estar mostrando a reconstrução gradativa da imagem, acrescentando cada vez mais cossenoides.
# Eu estou pedindo também para mostrar as cossenoides individuais que serão somadas gradativamente.
row_count = 2;
for u in range(1,N//2):
Faux = np.zeros_like(F)
Faux[:,u] = F[:,u]
Faux[:,N-u] = F[:,N-u] #-u = N-u
row_count = row_count + 1;
plt.subplot(rows,2,row_count)
plt.imshow(np.real(ifft2(Faux)), cmap='gray');
plt.title("DFT inverse (u=%s)" % u)
#print('\nFaux: \n', Faux)
row_count = row_count + 1;
Fsma = Fsma + Faux
plt.subplot(rows,2,row_count)
plt.imshow(np.real(ifft2(Fsma)),cmap='gray')
plt.title("Acumulative (u=%s)" % u)
#print('\nFsma: \n', Fsma)
plt.tight_layout()
plt.show()
diff = np.abs(np.abs(ifft2(Fsma)) - f).sum() # compare the orignal and acumlated image
print('Difference between original image and reconstructed: ', diff, " (almost zero)")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes ** -0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes ** -0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x : 1/(1+np.exp(-x))
self.activation_function_derivative= lambda x : x * (1 - x)
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output.
output_grad = 1 # hidden layer gradients
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer
hidden_grad = self.activation_function_derivative(hidden_outputs) # hidden layer gradients
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors * output_grad, hidden_outputs.T) # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * np.dot(hidden_errors * hidden_grad, inputs.T) # update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 2000
learning_rate = 0.01
hidden_nodes = 23
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csir-csiro', 'vresm-1-0', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: if "Remarks" contains "star", the source is classifyed star.
Step2: The distribution of galaxies looks similar to that of the HST COSMOS catalog, but that of stars has a peak at i-mag$\sim$22, which is not shown in that of the HSTxPS1 catalog.
Step3: ROC curve and Accuracy
Step4: Accuracy v.s. MAG with DEIMOSxHST
Step5: Cross-matching the sources in the DEIMOS catalog within radius = 0.5 arcsec around those in the HST catalog
Step6: The distribution of the distance has a peak at 0.1 arcsec. Changes the cross-matching radius to 0.3 arcsec.
Step7: Remove duplicated sources.
Step8: Remove the sources which are not able to be classified to star or galaxy by the DEIMOS catalog.
Step9: Accuracy v.s. MAG
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
%matplotlib inline
_df = pd.read_table('DEIMOS/deimos_10K_March2018/deimos.tbl', header=None)
arr = np.empty((len(_df), len(_df.iloc[0][0].split())), dtype='<U50')
for i in range(len(_df)):
i_row = [k for k in _df.iloc[i][0].split(' ') if (k != '')and(k != ' ')]
for j in range(len(_df.iloc[0][0].split())):
arr[i][j] = i_row[j]
df = pd.DataFrame(arr)
ra = np.array(df[1], dtype=float)
dec = np.array(df[2], dtype=float)
sel = np.array(df[3], dtype=int)
imag = np.array(df[4].replace('null', '-999').replace(' null', '-999'), dtype=float)
kmag = np.array(df[5].replace('null', '-999').replace(' null', '-999'), dtype=float)
zspec = np.array(df[6].replace('null', '-999').replace(' null', '-999'), dtype=float)
Qflag = np.array(df[7].replace('null', '-999').replace(' null', '-999'), dtype=int)
Q = np.array(df[8].replace('null', '-999').replace(' null', '-999'), dtype=float)
np.array(df[9][0:20])
sgFlag = np.empty(len(df), dtype=int)
for i in range(len(df[9])):
if 'star' in df[9][i]:
sgFlag[i] = 1 # star
elif 'null' in df[9][i]:
sgFlag[i] = -999 # null
else:
sgFlag[i] = 0 # galaxy
plt.hist(imag[sgFlag!=-999], bins=np.arange(15, 28, 0.2), color='0.8', label='All')
plt.hist(imag[sgFlag==0], bins=np.arange(15, 28, 0.2), alpha=0.5, label='GALAXY')
plt.hist(imag[sgFlag==1], bins=np.arange(15, 28, 0.2), alpha=0.5, label='STAR')
plt.yscale('log')
plt.xlabel('i mag'); plt.ylabel('#')
plt.legend(loc='best')
plt.show()
df = pd.DataFrame()
df['ra'] = ra; df['dec'] = dec
df['sel'] = sel
df['imag'] = imag; df['kmag'] = kmag
df['zspec'] = zspec
df['Qflag'] = Qflag; df['Q'] = Q
df['class'] = sgFlag
df[0:10]
df.to_csv('./DEIMOS/DEIMOS.csv', index=None)
import star_galaxy_models
rf_obj = star_galaxy_models.RandomForestModel()
rf_obj.read_rf_from_pickle()
features = ['wwpsfChiSq', 'wwExtNSigma', 'wwpsfLikelihood',
'wwPSFKronRatio', 'wwPSFKronDist', 'wwPSFApRatio',
'wwmomentRH', 'wwmomentXX', 'wwmomentXY', 'wwmomentYY',
'wwKronRad']
from sklearn.metrics import roc_curve, accuracy_score, auc, make_scorer
ps1_dei = pd.read_csv('./DEIMOS/PS1_DEIMOS_features.csv').drop_duplicates(subset='objid')
print("PS1xDEIMOS catalog constains %i sources."%len(ps1_dei))
ps1_dei_det_mask = np.logical_and(ps1_dei['class'] != -999, (ps1_dei.nDetections>0)&(ps1_dei.wwKronFlux>0))
ps1_dei = ps1_dei[ps1_dei_det_mask]
print("%i sources are classified by both of the DEIMOS and the ML model."%len(ps1_dei))
ps1_df = pd.read_csv('./DEIMOS/HST_COSMOS_features.csv')
dupl_mask = np.empty(len(ps1_dei), dtype=bool)
for i in range(len(dupl_mask)):
dupl_mask[i] = ps1_dei.objid.iloc[i] in np.array(ps1_df.objid)
print("Only %i sources are included both of the PS1xDEIMOS and the PS1xHST catalog..."%np.sum(dupl_mask))
ps1_dei = ps1_dei[~dupl_mask]
#print("%i sources are not contained in PS1xHST catalog."%len(ps1_dei))
kron_mag = -2.5*np.log10(ps1_dei.wwKronFlux/3631)
ps1_dei_features = ps1_dei[features]
ps1_dei_class = ps1_dei['class']
ps1_dei_score = rf_obj.rf_clf_.predict_proba(ps1_dei_features)
ps1_dei_pred = rf_obj.rf_clf_.predict(ps1_dei_features)
print("Overall accuracy of the classification by the ML model is %f"%accuracy_score(ps1_dei_class, ps1_dei_pred))
fpr, tpr, thre = roc_curve(ps1_dei_class, ps1_dei_score[:,1])
plt.grid(linestyle='dotted')
plt.plot(fpr, tpr, 'k-')
#plt.xscale('log'); plt.yscale('log')
plt.xlim(1e-3, 1e-1); plt.ylim(0.1, 1.01)
plt.xlabel('FPR'); plt.ylabel('TPR')
plt.show()
ps1_dei_class = np.array(ps1_dei_class)
ps1_dei_score = np.array(ps1_dei_score)
kron_mag = np.array(kron_mag)
binwidth = 1.5
Nboot = 100
mag_array = np.arange(14 , 23+binwidth, binwidth)
kron_mag = np.array(-2.5*np.log10(ps1_dei['wwKronFlux']/3631))
ml_acc_arr = np.zeros_like(mag_array, dtype=float)
ml_boot_scatt = np.vstack((np.zeros_like(mag_array, dtype=float), np.zeros_like(mag_array, dtype=float)))
for bin_num, binedge in enumerate(mag_array):
bin_sources = np.where((kron_mag >= binedge) & (kron_mag < binedge + binwidth))
ml_acc_arr[bin_num] = accuracy_score(ps1_dei_class[bin_sources],
ps1_dei_pred[bin_sources])
ml_boot_acc = np.empty(Nboot)
for i in range(Nboot):
boot_sources = np.random.choice(bin_sources[0], len(bin_sources[0]),
replace=True)
ml_boot_acc[i] = accuracy_score(ps1_dei_class[boot_sources],
ps1_dei_pred[boot_sources])
ml_boot_scatt[:,bin_num] = np.percentile(ml_boot_acc, [16, 84])
from sklearn.neighbors import KernelDensity
kde_grid = np.linspace(10,26,200)
deimos_stars = np.where(ps1_dei_class == 1)
deimos_gal = np.where(ps1_dei_class == 0)
deimos_kde_gal_norm = len(deimos_gal[0])/len(ps1_dei_class)
deimos_kde_star_norm = 1 - deimos_kde_gal_norm
kde_deimos = KernelDensity(bandwidth=1.059*np.std(kron_mag, ddof=1)*len(kron_mag)**(-0.2),
rtol=1E-4)
kde_deimos.fit(kron_mag[:, np.newaxis])
kde_deimos_stars = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_stars], ddof=1)*len(kron_mag[deimos_stars])**(-0.2),
rtol=1E-4)
kde_deimos_stars.fit(kron_mag[deimos_stars[0], np.newaxis])
kde_deimos_gal = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_gal], ddof=1)*len(kron_mag[deimos_gal])**(-0.2),
rtol=1E-4)
kde_deimos_gal.fit(kron_mag[deimos_gal[0], np.newaxis])
pdf_deimos = np.exp(kde_deimos.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_stars = np.exp(kde_deimos_stars.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_gal = np.exp(kde_deimos_gal.score_samples(kde_grid[:, np.newaxis]))
from matplotlib.ticker import MultipleLocator
#import seaborn as sns
color_dict = {'ml': "black"}
mag_bin_centers = mag_array + binwidth/2
#cmap_star = sns.cubehelix_palette(rot=0.5, light=0.7,dark=0.3,as_cmap=True)
#cmap_gal = sns.cubehelix_palette(start=0.3,rot=-0.5,light=0.7,dark=0.3,as_cmap=True)
fig, ax = plt.subplots(figsize=(8, 5))
ax.grid(linestyle='dotted', zorder=1)
ax.errorbar(mag_bin_centers, ml_acc_arr,
yerr=np.abs(ml_boot_scatt - ml_acc_arr),
ls='-', lw=.75, fmt='o',
color=color_dict['ml'], label="ML model",
linewidth=1.5, markersize=7.5, zorder=5)
# add KDE plots
ax.fill(kde_grid, pdf_deimos + 0.5, alpha=0.4, color="0.7", zorder=2)
ax.fill(kde_grid, pdf_deimos_gal*deimos_kde_gal_norm + 0.5, alpha=0.7, zorder=3)#, color=cmap_gal(0.25))
ax.fill(kde_grid, pdf_deimos_stars*deimos_kde_star_norm + 0.5, alpha=0.7, zorder=4)#, color=cmap_star(0.25))
ax.set_ylim(0.5,1.01)
ax.set_xlim(14, 24)
ax.tick_params(which="both", top=True, right=True, labelsize=15)
ax.set_xlabel('whiteKronMag', fontsize=15)
ax.set_ylabel('Accuracy', fontsize=15)
ax.yaxis.set_minor_locator(MultipleLocator(0.025))
ax.xaxis.set_major_locator(MultipleLocator(2))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
#ax.legend(bbox_to_anchor=(0.01, 0.3, 1., 0.102), loc=3, fontsize=13)
fig.subplots_adjust(top=0.98,right=0.98,left=0.1,bottom=0.12)
from astropy.table import Table
deimos = pd.read_csv('./DEIMOS/DEIMOS.csv')
hst = Table.read('./DEIMOS/HST_COSMOS.fit').to_pandas()
hstX = np.empty((len(hst), 2), dtype=np.float64)
hstX[:, 0] = hst['ALPHA_J2000']
hstX[:, 1] = hst['DELTA_J2000']
deiX = np.empty((len(deimos), 2), dtype=np.float64)
deiX[:, 0] = deimos['ra']
deiX[:, 1] = deimos['dec']
from astroML.crossmatch import crossmatch_angular
max_radius = 0.5 / 3600 # 0.5 arcsec
dist, ind = crossmatch_angular(hstX, deiX, max_radius)
match = ~np.isinf(dist)
print("The number of sources cross-matched is %i"%np.sum(match))
plt.hist(dist[match]*3600, bins=np.arange(0, 0.5,0.01))
plt.xlabel('Distance')
plt.show()
from astroML.crossmatch import crossmatch_angular
max_radius = 0.3 / 3600 # 0.3 arcsec
dist, ind = crossmatch_angular(hstX, deiX, max_radius)
match = ~np.isinf(dist)
print("The number of sources cross-matched is %i"%np.sum(match))
plt.hist(dist[match]*3600, bins=np.arange(0, 0.5,0.01))
plt.xlabel('Distance')
plt.show()
hst_match = hst[match]
deimos_match = deimos.loc[ind[match]]
dupl_mask = deimos_match.duplicated('ra')
deimos_match_uniq = deimos_match[~dupl_mask.values]
hst_match_uniq = hst_match[~dupl_mask.values]
good_mask = deimos_match_uniq["class"] != -999
deimos_match_uniq_good = deimos_match_uniq[good_mask.values]
hst_match_uniq_good = hst_match_uniq[good_mask.values]
print("The number of sources used to verify the classification accuracy is %i"%len(deimos_match_uniq))
xlims = [12, 29]
ylims = [12, 29]
plt.hexbin(hst_match_uniq["MAG_BEST"], deimos_match_uniq["imag"],
extent=[xlims[0], xlims[1], ylims[0], ylims[1]],
bins='log', cmap='viridis')
plt.xlim(xlims); plt.ylim(ylims)
plt.xlabel('MAG_BEST(HST)')
plt.ylabel('imag(DEIMOS)')
from sklearn.metrics import accuracy_score
print("The overall accuracy od the crassification of the HST catalog is %0.4f"\
%accuracy_score(deimos_match_uniq_good["class"], hst_match_uniq_good["MU_CLASS"]-1))
dei_class = np.array(deimos_match_uniq_good["class"], dtype=int)
hst_class = np.array(hst_match_uniq_good["MU_CLASS"]-1, dtype=int)
kron_mag = np.array(hst_match_uniq_good["MAG_BEST"])
binwidth = 1
Nboot = 100
mag_array = np.arange(14 , 26+binwidth, binwidth)
ml_acc_arr = np.zeros_like(mag_array, dtype=float)
ml_boot_scatt = np.vstack((np.zeros_like(mag_array, dtype=float), np.zeros_like(mag_array, dtype=float)))
for bin_num, binedge in enumerate(mag_array):
bin_sources = np.where((kron_mag >= binedge) & (kron_mag < binedge + binwidth))
ml_acc_arr[bin_num] = accuracy_score(dei_class[bin_sources],
hst_class[bin_sources])
ml_boot_acc = np.empty(Nboot)
for i in range(Nboot):
boot_sources = np.random.choice(bin_sources[0], len(bin_sources[0]),
replace=True)
ml_boot_acc[i] = accuracy_score(dei_class[boot_sources],
hst_class[boot_sources])
ml_boot_scatt[:,bin_num] = np.percentile(ml_boot_acc, [16, 84])
from sklearn.neighbors import KernelDensity
kde_grid = np.linspace(10,29,200)
deimos_stars = np.where(dei_class == 1)
deimos_gal = np.where(dei_class == 0)
deimos_kde_gal_norm = len(deimos_gal[0])/len(dei_class)
deimos_kde_star_norm = 1 - deimos_kde_gal_norm
kde_deimos = KernelDensity(bandwidth=1.059*np.std(kron_mag, ddof=1)*len(kron_mag)**(-0.2),
rtol=1E-4)
kde_deimos.fit(kron_mag[:, np.newaxis])
kde_deimos_stars = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_stars], ddof=1)*len(kron_mag[deimos_stars])**(-0.2),
rtol=1E-4)
kde_deimos_stars.fit(kron_mag[deimos_stars[0], np.newaxis])
kde_deimos_gal = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_gal], ddof=1)*len(kron_mag[deimos_gal])**(-0.2),
rtol=1E-4)
kde_deimos_gal.fit(kron_mag[deimos_gal[0], np.newaxis])
pdf_deimos = np.exp(kde_deimos.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_stars = np.exp(kde_deimos_stars.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_gal = np.exp(kde_deimos_gal.score_samples(kde_grid[:, np.newaxis]))
from matplotlib.ticker import MultipleLocator
#import seaborn as sns
color_dict = {'ml': "black"}
mag_bin_centers = mag_array + binwidth/2
#cmap_star = sns.cubehelix_palette(rot=0.5, light=0.7,dark=0.3,as_cmap=True)
#cmap_gal = sns.cubehelix_palette(start=0.3,rot=-0.5,light=0.7,dark=0.3,as_cmap=True)
fig, ax = plt.subplots(figsize=(8, 5))
ax.grid(linestyle='dotted', zorder=1)
ax.errorbar(mag_bin_centers, ml_acc_arr,
yerr=np.abs(ml_boot_scatt - ml_acc_arr),
ls='-', lw=.75, fmt='o',
color=color_dict['ml'], label="ML model",
linewidth=1.5, markersize=7.5, zorder=5)
# add KDE plots
ax.fill(kde_grid, pdf_deimos + 0.5, alpha=0.4, color="0.7", zorder=2)
ax.fill(kde_grid, pdf_deimos_gal*deimos_kde_gal_norm + 0.5, alpha=0.7, zorder=3)#, color=cmap_gal(0.25))
ax.fill(kde_grid, pdf_deimos_stars*deimos_kde_star_norm + 0.5, alpha=0.7, zorder=4)#, color=cmap_star(0.25))
ax.set_ylim(0.5,1.01)
ax.set_xlim(14, 27)
ax.tick_params(which="both", top=True, right=True, labelsize=15)
ax.set_xlabel('MAG_BEST', fontsize=15)
ax.set_ylabel('Accuracy', fontsize=15)
ax.yaxis.set_minor_locator(MultipleLocator(0.025))
ax.xaxis.set_major_locator(MultipleLocator(2))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
#ax.legend(bbox_to_anchor=(0.01, 0.3, 1., 0.102), loc=3, fontsize=13)
fig.subplots_adjust(top=0.98,right=0.98,left=0.1,bottom=0.12)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 Least squares and linear basis functions models
Step2: Load the data
Step3: Test it here
Step5: 1.2 Least squares with a linear basis function model
Step8: Let us play with polynomial regression. Note that we will use your implemented function compute_mse. Please copy and paste your implementation from exercise02.
Step9: Run polynomial regression
Step11: 2 Evaluating model predication performance
Step13: Then, test your split_data function below.
Step16: 3 Ridge Regression
|
<ASSISTANT_TASK:>
Python Code:
def least_squares(y, tx):
calculate the least squares solution.
#a = tx.T.dot(tx)
#b = tx.T.dot(y)
#return np.linalg.solve(a, b)
w = np.linalg.inv(tx.T @ tx) @ tx.T @ y
return 1 / 2 * np.mean((y - tx.dot(w)) ** 2), w
from helpers import *
def test_your_least_squares():
height, weight, gender = load_data_from_ex02(sub_sample=False, add_outlier=False)
x, mean_x, std_x = standardize(height)
y, tx = build_model_data(x, weight)
w, err = least_squares(y, tx)
print(w)
print(err)
test_your_least_squares()
# load dataset
x, y = load_data()
print("shape of x {}".format(x.shape))
print("shape of y {}".format(y.shape))
def build_poly(x, degree):
polynomial basis functions for input data x, for j=0 up to j=degree.
return (x.reshape(x.shape[0], 1) ** np.arange(degree + 1))
from plots import *
def calculate_mse(e):
Calculate the mse for vector e.
return 1/2*np.mean(e**2)
def polynomial_regression():
Constructing the polynomial basis function expansion of the data,
and then running least squares regression.
# define parameters
degrees = [1, 3, 7, 12]
# define the structure of the figure
num_row = 2
num_col = 2
f, axs = plt.subplots(num_row, num_col)
for ind, degree in enumerate(degrees):
tx = build_poly(x, degree)
err, weights = least_squares(y, tx)
rmse = np.sqrt(2 * err)
print("Processing {i}th experiment, degree={d}, rmse={loss}".format(
i=ind + 1, d=degree, loss=rmse))
# plot fit
plot_fitted_curve(
y, x, weights, degree, axs[ind // num_col][ind % num_col])
plt.tight_layout()
plt.savefig("visualize_polynomial_regression")
plt.show()
polynomial_regression()
def split_data(x, y, ratio, seed=1):
split the dataset based on the split ratio. If ratio is 0.8
you will have 80% of your data set dedicated to training
and the rest dedicated to testing
# set seed
np.random.seed(seed)
train_size = int(ratio * len(x))
train_indices = np.random.permutation(len(x))[:train_size]
test_indices = np.random.permutation(len(x))[train_size:]
x_train = x[train_indices]
y_train = y[train_indices]
x_test = x[test_indices]
y_test = y[test_indices]
assert(x_train.shape[0] + x_test.shape[0] == x.shape[0])
assert(y_train.shape[0] + y_test.shape[0] == y.shape[0])
return x_train, y_train, x_test, y_test
def compute_rmse(y, tx, w):
e = y - tx @ w
return np.sqrt(2 * calculate_mse(e))
def train_test_split_demo(x, y, degree, ratio, seed):
polynomial regression with different split ratios and different degrees.
x_train, y_train, x_test, y_test = split_data(x, y, ratio, seed)
x_train = build_poly(x_train, degree)
x_test = build_poly(x_test, degree)
err, weight = least_squares(y_train, x_train)
rmse_tr = compute_rmse(y_train, x_train, weight)
rmse_te = compute_rmse(y_test, x_test, weight)
print("proportion={p}, degree={d}, Training RMSE={tr:.3f}, Testing RMSE={te:.3f}".format(
p=ratio, d=degree, tr=rmse_tr, te=rmse_te))
seed = 6
degrees = [1, 3, 7, 12]
split_ratios = [0.9, 0.5, 0.1]
for split_ratio in split_ratios:
for degree in degrees:
train_test_split_demo(x, y, degree, split_ratio, seed)
def ridge_regression(y, tx, lambda_):
implement ridge regression.
aI = 2 * tx.shape[0] * lambda_ * np.identity(tx.shape[1])
a = tx.T.dot(tx) + aI
b = tx.T.dot(y)
return np.linalg.solve(a, b)
def ridge_regression_demo(x, y, degree, ratio, seed):
ridge regression demo.
# define parameter
lambdas = np.logspace(-5, 0, 15)
x_train, y_train, x_test, y_test = split_data(x, y, ratio, seed)
x_train = build_poly(x_train, degree)
x_test = build_poly(x_test, degree)
rmse_tr = []
rmse_te = []
for ind, lambda_ in enumerate(lambdas):
weights = ridge_regression(y_train, x_train, lambda_)
rmse_tr.append(compute_rmse(y_train, x_train, weights))
rmse_te.append(compute_rmse(y_test, x_test, weights))
print("proportion={p}, degree={d}, lambda={l:.3f}, Training RMSE={tr:.3f}, Testing RMSE={te:.3f}".format(
p=ratio, d=degree, l=lambda_, tr=rmse_tr[ind], te=rmse_te[ind]))
# Plot the obtained results
plot_train_test(rmse_tr, rmse_te, lambdas, degree)
seed = 56
degree = 7
split_ratio = 0.5
ridge_regression_demo(x, y, degree, split_ratio, seed)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1) Visualize the Galaxy Image Data
Step2: Problem 1b
Step3: Problem 1c
Step4: Problem 2) Measuring Radial Intensities
Step5: Problem 2b
Step6: Problem 2c
Step7: For this and the remaining problems, assume that the galaxy is centered at (x, y) = (128, 138), that the ellipticity (1 - b/a) is fixed at 0.3, and that $\theta = 0.8377$. [In principle we could ask you to determine each of these values, but that is outside the scope of the current problem.]
Step8: Problem 2e
Step9: Problem 3) Sersic Profile
Step10: Problem 3b
Step11: Challenge Problem
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
%matplotlib notebook
gal_im = fits.getdata( # complete
# complete
# complete
plt.colorbar()
plt.tight_layout()
plt.hist( # complete
plt.yscale("log")
unusual_pix = # complete
gal_median_filt = # complete
# complete
# complete
plt.tight_layout()
def y_ellipse(x, h, k, a, b, theta):
# complete
# complete
# complete
# complete
# complete
# complete
return # complete
x_grid = np.linspace(-3.605, 3.605, 1000)
y_vals = np.vstack((np.empty_like(x_grid), np.empty_like(x_grid)))
for x_num, x in enumerate(x_grid):
roots = y_ellipse( # complete
y_vals[:,x_num] = np.sort(roots)
plt.plot(np.append(x_grid, x_grid[::-1]), np.append(y_vals[0], y_vals[1][::-1]))
plt.xlim(-4, 4)
plt.ylim(-4, 4)
def mean_radial_flux(im_data, h, k, a, b, theta):
# complete
# complete
# complete
# complete
# complete
# complete
return # complete
a5_flux = # complete
a20_flux = # complete
print("The mean flux at 5 pixels is {:.6f}".format( # complete
print("The mean flux at 20 pixels is {:.6f}".format( # complete
r_grid = # complete
mean_rad_prof = # complete
for # complete
plt.plot( # complete
def squared_error( # complete
# complete
# complete
# complete
from scipy.optimize import minimize
res = minimize( # complete
# complete
# complete
print("The Sersic index is {:.3f}".format( # complete
# complete
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading and cleaning weather data
Step2: The summary above shows descriptive statistics for each of the numeric columns in the table. There is a good range of weather conditions in there, including
Step3: The plot above shows the trends in minimum and maximum temperature during April and May 2016. The overall trend is an increase in both min and max temperatures, with a lot of variation in the changes in temperature. For example, around the 2nd May, the maximum temperature was less than the minimum temperature a few days earlier!
Step4: Temperature pair plots
Step5: The pair plots show there's a reasonable correlation between the maximum and minimum temperatures.
Step6: The plot shows both the max and min pressure as being highly correlated. There may also be correlations between the pressure and other more directly observable factors such as temperature and wind.
Step7: The graph shows there was some serious rain in April and May. As well as some dry spells through early April and May, there were also individual days where over an inch of rain fell. I'd definitely not be tempted to take a bike ride in those conditions !
Step8: This plot shows the majority of days had no rainfall at all. There were about 10 days with less than 0.5" of rain, and the count of days drops off steeply as the rainfall value increases. We may be able to transform this one-sided skewed distribution by setting a threshold, and converting to a boolean (above / below the threshold).
Step9: The graph shows a close correlation between the min_wind, max_wind, and max_gust speeds, as you'd expect. When building linear models, it's best to remove highly correlated values so we may just use the max_gust of the three based on how correlated they are.
Step10: This pairplot shows a high positive correlation between the max_wind and max_gust, as you'd expect. There is also a strong correlation between the minimum and maximum wind speeds. When building models, we probably need to take the max_wind or max_gust to avoid multiple correlated columns.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import folium
import seaborn as sns
import datetime
from bcycle_lib.utils import *
%matplotlib inline
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
weather_df = load_weather()
weather_df.head(6)
weather_df.describe()
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = weather_df.plot(y=['max_temp', 'min_temp'], ax=ax)
ax.legend(fontsize=13)
xtick = pd.date_range( start=weather_df.index.min( ), end=weather_df.index.max( ), freq='D' )
ax.set_xticks( xtick )
# ax.set_xticklabels(weather_df.index.strftime('%a %b %d'))
ax.set_xlabel('Date', fontdict={'size' : 14})
ax.set_ylabel('Temperature (°F)', fontdict={'size' : 14})
ax.set_title('Austin Minimum and Maximum Temperatures during April and May 2016', fontdict={'size' : 16})
# fig.autofmt_xdate(rotation=90)
ttl = ax.title
ttl.set_position([.5, 1.02])
ax.legend(['Max Temp', 'Min Temp'], fontsize=14, loc=1)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
fig, ax = plt.subplots(1,2, figsize=(12,6))
# ax[0] = weather_df['min_temp'].plot.hist(ax=ax[0]) # sns.distplot(weather_df['min_temp'], ax=ax[0])
# ax[1] = weather_df['max_temp'].plot.hist(ax=ax[1]) # sns.distplot(weather_df['max_temp'], ax=ax[1])
ax[0] = sns.distplot(weather_df['min_temp'], ax=ax[0])
ax[1] = sns.distplot(weather_df['max_temp'], ax=ax[1])
for axis in ax:
axis.set_xlabel('Temperature (°F)', fontdict={'size' : 14})
axis.set_ylabel('Density', fontdict={'size' : 14})
ax[0].set_title('Minimum Temperature Distribution', fontdict={'size' : 16})
ax[1].set_title('Maximum Temperature Distribution', fontdict={'size' : 16})
g = sns.pairplot(data=weather_df[['min_temp', 'max_temp']], kind='reg',size=4)
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = weather_df.plot(y=['max_pressure', 'min_pressure'], ax=ax)
ax.legend(fontsize=13)
xtick = pd.date_range( start=weather_df.index.min( ), end=weather_df.index.max( ), freq='D' )
ax.set_xticks( xtick )
# ax.set_xticklabels(weather_df.index.strftime('%a %b %d'))
ax.set_xlabel('Date', fontdict={'size' : 14})
ax.set_ylabel('Pressure (inches)', fontdict={'size' : 14})
ax.set_title('Min and Max Pressure', fontdict={'size' : 18})
# fig.autofmt_xdate(rotation=90)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = weather_df['precipitation'].plot.bar(ax=ax, legend=None)
ax.set_xticklabels(weather_df.index.strftime('%a %b %d'))
ax.set_xlabel('', fontdict={'size' : 14})
ax.set_ylabel('Precipitation (inches)', fontdict={'size' : 14})
ax.set_title('Austin Precipitation in April and May 2016', fontdict={'size' : 16})
ax.tick_params(axis='x', labelsize=13)
ax.tick_params(axis='y', labelsize=14)
ttl = ax.title
ttl.set_position([.5, 1.02])
fig, ax = plt.subplots(1,1, figsize=(6,6))
ax = weather_df['precipitation'].plot.hist(ax=ax)
ax.set_xlabel('Precipitation (inches)', fontdict={'size' : 14})
ax.set_ylabel('Count', fontdict={'size' : 14})
ax.set_title('Precipitation distribution', fontdict={'size' : 16})
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = weather_df.plot(y=['max_wind', 'min_wind', 'max_gust'], ax=ax)
ax.legend(fontsize=13)
xtick = pd.date_range( start=weather_df.index.min( ), end=weather_df.index.max( ), freq='D' )
ax.set_xticks( xtick )
# ax.set_xticklabels(weather_df.index.strftime('%a %b %d'))
ax.set_xlabel('Date', fontdict={'size' : 14})
ax.set_ylabel('Wind speed (MPH)', fontdict={'size' : 14})
ax.set_title('Wind speeds', fontdict={'size' : 18})
# fig.autofmt_xdate(rotation=90)
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
g = sns.pairplot(data=weather_df[['min_wind', 'max_wind', 'max_gust']], kind='reg',size=3.5)
# weather_df[['thunderstorm', 'rain', 'fog']].plot.bar(figsize=(20,20))
heatmap_df = weather_df.copy()
heatmap_df = heatmap_df[['thunderstorm', 'rain', 'fog']]
heatmap_df = heatmap_df.reset_index()
heatmap_df['day'] = heatmap_df['date'].dt.dayofweek
heatmap_df['week'] = heatmap_df['date'].dt.week
heatmap_df = heatmap_df.pivot_table(values='thunderstorm', index='day', columns='week')
heatmap_df = heatmap_df.fillna(False)
# ['day'] = heatmap_df.index.dt.dayofweek
# Restore proper day and week-of-month labels.
heatmap_df.index = ['Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
weeks = heatmap_df.columns
weeks = ['2016-W' + str(week) for week in weeks] # Convert to '2016-Wxx'
weeks = [datetime.datetime.strptime(d + '-0', "%Y-W%W-%w").strftime('%b %d') for d in weeks]
heatmap_df.columns = weeks
fig, ax = plt.subplots(1,1, figsize=(8, 6))
sns.heatmap(data=heatmap_df, square=True, cmap='Blues', linewidth=2, cbar=False, linecolor='white', ax=ax)
ax.set_title('Thunderstorms by day and week', fontdict={'size' : 18})
ttl = ax.title
ttl.set_position([.5, 1.05])
ax.set_xlabel('Week ending (Sunday)', fontdict={'size' : 14})
ax.set_ylabel('')
ax.tick_params(axis='x', labelsize=13)
ax.tick_params(axis='y', labelsize=13)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the output and write it to a file on your local machine
Step2: Unpack the file
Step3: Change to the directory containing the unpacked contents and view
Step4: After browsing the hydrotrend directory,
Step5: View basic information about the data
Step6: Next, read the discharge data
Step7: View basic information about the data
Step8: Prepare the data by removing the two header lines and the empty line at the end of the discharge series. Also remove the last two values in the time series. Both series now have 365 elements.
Step9: Now we can visualize the model output as a time series
|
<ASSISTANT_TASK:>
Python Code:
run_id = 'addbbe35-5fd7-4d1b-a37d-3629768532b4'
download_file = run_id + '.tar.gz'
download_url = 'http://csdms.colorado.edu/pub/users/wmt/' + download_file
import requests
r = requests.get(download_url)
with open(download_file, 'w') as fp:
fp.write(r.content)
import tarfile
tar = tarfile.open(download_file)
tar.extractall()
tar.close()
import os
os.chdir(run_id)
%ls
with open("hydrotrend/_time.txt", "r") as fp:
time = fp.read().split("\n")
print "Data type: {}".format(type(time))
print "Length: {} elements".format(len(time))
print "First five elements:\n{}".format(time[0:5])
with open("hydrotrend/HYDRO_OUTPUT/HYDROASCII.Q", "r") as fp:
discharge = fp.read().split("\n")
print "Data type: {}".format(type(discharge))
print "Length: {} elements".format(len(discharge))
print "First five elements:\n{}".format(discharge[0:5])
discharge = discharge[2:-1]
time = time[:-2]
print len(discharge), len(time)
%pylab inline
plot(time, discharge)
title("HydroTrend: discharge at river mouth versus time")
xlabel("Time [$days$]")
ylabel("Q [$m^3 s^{-1}$]")
show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Alternatively, the best partition into communities can be inferred, for example
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import networkx as nx
from netgraph import Graph
# create a modular graph
partition_sizes = [10, 20, 30, 40]
g = nx.random_partition_graph(partition_sizes, 0.5, 0.1)
# create a dictionary that maps nodes to the community they belong to
node_to_community = dict()
node = 0
for community_id, size in enumerate(partition_sizes):
for _ in range(size):
node_to_community[node] = community_id
node += 1
community_to_color = {
0 : 'tab:blue',
1 : 'tab:orange',
2 : 'tab:green',
3 : 'tab:red',
}
node_color = {node: community_to_color[community_id] for node, community_id in node_to_community.items()}
Graph(g,
node_color=node_color, node_edge_width=0, edge_alpha=0.1,
node_layout='community', node_layout_kwargs=dict(node_to_community=node_to_community),
edge_layout='bundled', edge_layout_kwargs=dict(k=2000),
)
plt.show()
from community import community_louvain
node_to_community = community_louvain.best_partition(g)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Class for Creating Training and Testing Samples
Step2: Section 2. Linear Regression
Step3: Only categorical
Step4: <h3 id='2_4'>2.4 What are the coefficients (or weights) of the features in your linear regression model?</h3>
Step5: <h3 id='2_5'>2.5 What is your model’s $R^{2}$ (coefficient of determination) value?</h3>
Step6: As can be seen from the above, somewhat arbitrarily-selected, values, the number of close predictions is a little over $50\%$ when close is defined as a prediction with a difference that is less than $1$ from the actual observed value. Given that the value of entries can take on such a large range of values $[0, 32814]$, differences less than $100$ and $1000$ are shown as well.
Step7: Since the above predictions show a discernible, linear, and increasing pattern (and, thus, are not stochastic), it seems apparent that there is in fact not a linear relationship between the explanatory and response variables. Thus, a linear model is not appropriate for the current data set.
Step8: Gradient Descent
Step9: regression makes little sense here
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import scipy as sp
import scipy.stats as st
import statsmodels.api as sm
import scipy.optimize as op
import matplotlib.pyplot as plt
%matplotlib inline
filename = '/Users/excalibur/py/nanodegree/intro_ds/final_project/improved-dataset/turnstile_weather_v2.csv'
# import data
data = pd.read_csv(filename)
print data.columns.values
data['ENTRIESn_hourly'].describe()
plt.boxplot(data['ENTRIESn_hourly'], vert=False)
plt.show()
data[data['ENTRIESn_hourly'] == 0].count()[0]
data[data['ENTRIESn_hourly'] > 500].count()[0]
data[data['ENTRIESn_hourly'] > 1000].count()[0]
data[data['ENTRIESn_hourly'] > 5000].count()[0]
data[data['ENTRIESn_hourly'] > 10000].count()[0]
plt.figure(figsize = (10,10))
plt.hist(data['ENTRIESn_hourly'], bins=100)
plt.show()
plt.boxplot(data['ENTRIESn_hourly'], vert=False)
plt.show()
# the overwhelming majority of the action is occurring below 10000
#data = data[(data['ENTRIESn_hourly'] <= 10000)]
plt.figure(figsize = (10,10))
plt.hist(data['ENTRIESn_hourly'].values, bins=100)
plt.show()
plt.boxplot(data['ENTRIESn_hourly'].values, vert=False)
plt.show()
class SampleCreator:
def __init__(self,data,categorical_features,quantitative_features):
m = data.shape[0]
random_indices = np.random.choice(np.arange(0,m), size=m, replace=False)
train_indices = random_indices[0:(m-(m*0.10))] # leave about 10% of data for testing
test_indices = random_indices[(m-(m*0.10)):]
# check disjointedness of training and testing indices
for i in train_indices:
if i in test_indices:
print "<!> Training and Testing Sample Overlap <!>"
# response vector
y = data['ENTRIESn_hourly'].values
# get quantitative features
X = data[quantitative_features].values
# Feature Scaling
# mean normalization
x_i_bar = []
s_i = []
for i in np.arange(X.shape[1]):
x_i_bar.append(np.mean(X[:,i]))
s_i.append(np.std(X[:,i]))
X[:,i] = np.true_divide((np.subtract(X[:,i],x_i_bar[i])),s_i[i])
# create dummy variables for categorical features
for feature in categorical_features:
dummies = sm.categorical(data[feature].values, drop=True)
X = np.hstack((X,dummies))
# final design matrix
X = sm.add_constant(X)
# training samples
self.y_train = y[train_indices]
self.X_train = X[train_indices]
# testing samples
self.y_test = y[test_indices]
self.X_test = X[test_indices]
#categorical_features = ['UNIT', 'hour', 'day_week', 'station']
categorical_features = ['UNIT']
#quantitative_features = ['latitude', 'longitude', 'rain']
quantitative_features = []
# for tracking during trials
best_rsquared = 0
best_results = []
# perform 5 trials; keep model with best R^2
for x in xrange(0,5):
samples = SampleCreator(data,categorical_features,quantitative_features)
model = sm.OLS(samples.y_train,samples.X_train)
results = model.fit()
if results.rsquared > best_rsquared:
best_rsquared = results.rsquared
best_results = results
print "r = {0:.2f}".format(np.sqrt(best_results.rsquared))
print "R^2 = {0:.2f}".format(best_results.rsquared)
X_train = samples.X_train
print X_train.shape
y_train = samples.y_train
print y_train.shape
y_train.shape = (y_train.shape[0],1)
print y_train.shape
X_test = samples.X_test
print X_test.shape
y_test = samples.y_test
print y_test.shape
y_test.shape = (y_test.shape[0],1)
print y_test.shape
ols_y_hat = results.predict(X_test)
ols_y_hat.shape = (ols_y_hat.shape[0],1)
plt.title('Observed Values vs Fitted Predictions')
plt.xlabel('observed values')
plt.ylabel('predictions')
plt.scatter(y_test, ols_y_hat, alpha=0.7, color='green', edgecolors='black')
plt.show()
print best_results.params
ols_residuals = (ols_y_hat - y_test)
ols_residuals.shape
plt.boxplot(ols_residuals, vert=False)
plt.title('Boxplot of Residuals')
plt.xlabel('residuals')
plt.show()
plt.scatter(ols_y_hat,ols_residuals, alpha=0.7, color='purple', edgecolors='black')
plt.title('RESIDUAL PLOT')
plt.plot([np.min(ols_y_hat),np.max(ols_y_hat)], [0, 0], color='red')
plt.xlabel('predictions')
plt.ylabel('residuals')
plt.show()
plt.hist(y_test, color='purple', alpha=0.7, label='observations')
plt.hist(ols_y_hat, color='green', alpha=0.5, bins=6, label='ols predictions')
plt.title('OBSERVATIONS vs OLS PREDICTIONS')
plt.ylabel('frequency')
plt.legend()
plt.show()
plt.hist(ols_residuals, color='gray', alpha=0.7)
plt.title('OLS RESIDUALS')
plt.ylabel('frequency')
plt.show()
best_results.summary()
#gradient descent, number of iterations
#iterations = 100
iterations = 300
# learning rates
#alpha = [-0.3, -0.1, -0.03, -0.01, -0.003, -0.001, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3]
#alpha = [0.001] # before removing large values
alpha = [0.01]
# number of examples
m = X_train.shape[0]
print "m = {0}".format(m)
# number of features
n = X_train.shape[1]
print "n = {0}".format(n)
# theta parameters
theta = np.zeros(((n,1)))
# vectorized cost function
def J(X,y):
m = X.shape[0]
return (1.0/(2*m)) * (((X.dot(theta)) - y).T).dot(X.dot(theta) - y)
# vectorized delta function
def delta(X,y):
return X.T.dot((X.dot(theta)) - y)
# gradient descent, test multiple alphas
for a in np.arange(0,len(alpha)):
# reset theta
theta = np.zeros(((n),1))
# reset vector J_values, store cost function values for plotting
J_values = np.zeros((iterations,1))
# minibatch process
for i in np.arange(0,m,100):
for iteration in xrange(0,iterations):
X = X_train[i:i+100]
y = y_train[i:i+100]
theta = theta - (alpha[a] * delta(X,y))
J_values[iteration] = J(X,y)
# visualize the cost function (2-D)
cost_x = np.arange(iterations)
cost_x.shape = (iterations,1)
plt.plot(cost_x,J_values)
plt.title("Learning Rate: " + str(alpha[a]))
plt.xlabel('iterations')
plt.ylabel(r"$J(\theta)$")
plt.show()
print "Parameters:\n{0}\n...".format(theta[0:5])
grad_desc_y_hat = X_test.dot(theta)
print grad_desc_y_hat.shape
plt.title('Observed Values vs Fitted Predictions')
plt.xlabel('observed values')
plt.ylabel('predictions')
plt.scatter(y_test, grad_desc_y_hat, alpha=0.7, color='green', edgecolors='black')
plt.show()
gd_residuals = (grad_desc_y_hat - y_test)
gd_residuals.shape
plt.boxplot(gd_residuals, vert=False)
plt.title('Boxplot of Residuals')
plt.xlabel('residuals')
plt.show()
plt.scatter(grad_desc_y_hat,gd_residuals, alpha=0.7, color='purple', edgecolors='black')
plt.title('RESIDUAL PLOT')
plt.plot([np.min(grad_desc_y_hat),np.max(grad_desc_y_hat)], [0, 0], color='red')
plt.xlabel('predictions')
plt.ylabel('residuals')
plt.show()
gd_rounded_yhat = np.round(grad_desc_y_hat)
for i in np.arange(y_test.shape[0]):
if gd_rounded_yhat[i] == y_test[i]:
print gd_rounded_yhat[i]
plt.hist(y_test, color='purple', alpha=0.7, label='observations')
plt.hist(grad_desc_y_hat, color='green', alpha=0.5, label='gd predictions')
plt.title('OBSERVATIONS vs GD PREDICTIONS')
plt.ylabel('frequency')
plt.show()
plt.hist(gd_residuals, color='gray', alpha=0.7)
plt.title('GD RESIDUALS')
plt.ylabel('frequency')
plt.show()
def grad_desc_score():
within_50 = 0
for i in range(len(y_test)):
if np.absolute(y_test[i] - grad_desc_y_hat[i]) <= 1000:
within_50 += 1
return within_50*1.0/len(y_test)
grad_desc_score()
X_test[0:5]
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators = 100, n_jobs=-1) # 10, 30, 100, 300
clf = clf.fit(X_train,np.ravel(y_train))
units = data['UNIT'].values
entries = data['ENTRIESn_hourly'].values
units = np.array([int(u.replace('R','')) for u in units])
units.shape = (units.shape[0],1)
clf = RandomForestClassifier(n_estimators = 10, n_jobs=-1) # 10, 30, 100, 300
clf = clf.fit(units,entries)
pred = clf.predict(units)
clf.score(units,entries)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Change the following variables according to your definitions.
Step2: Change the following variables ONLY if necessary.
Step3: Initialize Vertex AI SDK
Step4: Submit a Vertex custom training job
Step5: Configure a custom training job
Step6: To train the model with HugeCTR, you must inform the cardinalities of the categorical features.
Step7: Set training node configuration
Step8: Configure worker pool specifications
Step9: Submit and monitor the job
Step10: Submit and monitor a Vertex hyperparameter tuning job
Step11: Submit and monitor the job
Step12: Retrieve trial results
Step13: Find the best trial
|
<ASSISTANT_TASK:>
Python Code:
import json
import os
import time
from google.cloud import aiplatform as vertex_ai
from google.cloud.aiplatform import hyperparameter_tuning as hpt
# Project definitions
PROJECT_ID = '<YOUR PROJECT ID>' # Change to your project id.
REGION = '<LOCATION OF RESOURCES>' # Change to your region.
# Service Account address
VERTEX_SA = f'vertex-sa@{PROJECT_ID}.iam.gserviceaccount.com' # Change to your service account with Vertex AI Admin permitions.
# Bucket definitions
BUCKET = '<YOUR BUCKET NAME>' # Change to your bucket.
# Dataset information for training / validation / schema. The path must point to the file _gcs_file_list.txt
# The path MUST start with `/gcs/<bucket_name>/...` to be used as a GCSFuse path.
# The following examples is a path to a bucket create by the previous notebook (preprocessing with NVTabular)
# Please change to your path.
TRAIN_DATA = '/gcs/merlin-on-gcp/nvt-preprocessing-v01-2205/nvt-csv-pipeline/375468928805/nvt-csv-pipeline-20220603050219/transform-dataset-op_-2488396574040784896/transformed_dataset/_gcs_file_list.txt'
VALID_DATA = '/gcs/merlin-on-gcp/nvt-preprocessing-v01-2205/nvt-csv-pipeline/375468928805/nvt-csv-pipeline-20220603050219/transform-dataset-op-2_4429132453600296960/transformed_dataset/_gcs_file_list.txt'
# Schema used by the training pipepine
# The path must point to the file schema.pbtxt
SCHEMA_PATH = '/gcs/merlin-on-gcp/nvt-preprocessing-v01-2205/nvt-csv-pipeline/375468928805/nvt-csv-pipeline-20220603050219/transform-dataset-op_-2488396574040784896/transformed_dataset/schema.pbtxt'
# Bucket definitions
VERSION = 'v01'
MODEL_NAME = 'deepfm'
MODEL_DISPLAY_NAME = f'hugectr-{MODEL_NAME}-{VERSION}'
WORKSPACE = f'gs://{BUCKET}/{MODEL_DISPLAY_NAME}'
# Docker definitions for training
IMAGE_NAME = 'hugectr-training'
IMAGE_URI = f'gcr.io/{PROJECT_ID}/{IMAGE_NAME}'
DOCKERNAME = 'hugectr'
vertex_ai.init(
project=PROJECT_ID,
location=REGION,
staging_bucket=os.path.join(WORKSPACE, 'stg')
)
FILE_LOCATION = './src'
! gcloud builds submit --config src/cloudbuild.yaml --substitutions _DOCKERNAME=$DOCKERNAME,_IMAGE_URI=$IMAGE_URI,_FILE_LOCATION=$FILE_LOCATION --timeout=2h --machine-type=e2-highcpu-8
# Training parameters
NUM_EPOCHS = 0
MAX_ITERATIONS = 25000
EVAL_INTERVAL = 1000
EVAL_BATCHES = 500
EVAL_BATCHES_FINAL = 2500
DISPLAY_INTERVAL = 200
SNAPSHOT_INTERVAL = 0
PER_GPU_BATCH_SIZE = 2048
LR = 0.001
DROPOUT_RATE = 0.5
NUM_WORKERS = 12
cardinalities = [
9999999,
39061,
17296,
7425,
20266,
4,
7123,
1544,
64,
9999999,
3067956,
405283,
11,
2209,
11939,
155,
4,
977,
15,
9999999,
9999999,
9999999,
590152,
12974,
109,
37
]
cardinalities = ' '.join([str(c) for c in cardinalities])
MACHINE_TYPE = 'a2-highgpu-1g'
ACCELERATOR_TYPE = 'NVIDIA_TESLA_A100'
ACCELERATOR_NUM = 1
gpus = json.dumps([list(range(ACCELERATOR_NUM))]).replace(' ','')
worker_pool_specs = [
{
"machine_spec": {
"machine_type": MACHINE_TYPE,
"accelerator_type": ACCELERATOR_TYPE,
"accelerator_count": ACCELERATOR_NUM,
},
"replica_count": 1,
"container_spec": {
"image_uri": IMAGE_URI,
"command": ["python", "-m", "task"],
"args": [
f'--per_gpu_batch_size={PER_GPU_BATCH_SIZE}',
f'--model_name={MODEL_NAME}',
f'--train_data={TRAIN_DATA}',
f'--valid_data={VALID_DATA}',
f'--schema={SCHEMA_PATH}',
f'--slot_size_array={cardinalities}',
f'--max_iter={MAX_ITERATIONS}',
f'--max_eval_batches={EVAL_BATCHES}',
f'--eval_batches={EVAL_BATCHES_FINAL}',
f'--dropout_rate={DROPOUT_RATE}',
f'--lr={LR}',
f'--num_workers={NUM_WORKERS}',
f'--num_epochs={NUM_EPOCHS}',
f'--eval_interval={EVAL_INTERVAL}',
f'--snapshot={SNAPSHOT_INTERVAL}',
f'--display_interval={DISPLAY_INTERVAL}',
f'--gpus={gpus}',
],
},
}
]
job_name = 'hugectr_{}'.format(time.strftime("%Y%m%d_%H%M%S"))
base_output_dir = os.path.join(WORKSPACE, job_name)
job = vertex_ai.CustomJob(
display_name=job_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir
)
job.run(
sync=True,
service_account=VERTEX_SA,
restart_job_on_worker_restart=False
)
metric_spec = {'AUC': 'maximize'}
parameter_spec = {
'lr': hpt.DoubleParameterSpec(min=0.001, max=0.01, scale='log'),
'dropout_rate': hpt.DiscreteParameterSpec(values=[0.4, 0.5, 0.6], scale=None),
}
job_name = 'HUGECTR_HTUNING_{}'.format(time.strftime("%Y%m%d_%H%M%S"))
base_output_dir = os.path.join(WORKSPACE, "model_training", job_name)
custom_job = vertex_ai.CustomJob(
display_name=job_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir
)
hp_job = vertex_ai.HyperparameterTuningJob(
display_name=job_name,
custom_job=custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=4,
parallel_trial_count=2,
search_algorithm=None)
hp_job.run(
sync=True,
service_account=VERTEX_SA,
restart_job_on_worker_restart=False
)
hp_job.trials
best_trial = sorted(hp_job.trials,
key=lambda trial: trial.final_measurement.metrics[0].value,
reverse=True)[0]
print("Best trial ID:", best_trial.id)
print(" AUC:", best_trial.final_measurement.metrics[0].value)
print(" LR:", best_trial.parameters[1].value)
print(" Dropout rate:", best_trial.parameters[0].value)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set the scope for the Extract-Transform
Step2: Create a locally cached datastore
Step3: EIA-860
Step4: Transform EIA-860 / EIA-860m
Step5: EIA-923
Step6: Transform just the EIA-923
Step7: Combined EIA Data
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import pudl
import logging
import sys
from pathlib import Path
import pandas as pd
pd.options.display.max_columns = None
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.handlers = [handler]
pudl_settings = pudl.workspace.setup.get_defaults()
from pudl.settings import Eia860Settings, Eia923Settings, EiaSettings
from pudl.metadata.classes import DataSource
eia860_data_source = DataSource.from_id("eia860")
eia860_years = eia860_data_source.working_partitions["years"]
#eia860_years = [2020]
eia860_settings = Eia860Settings(years=eia860_years)
# Uncomment to use all available years:
eia923_data_source = DataSource.from_id("eia923")
eia923_years = eia923_data_source.working_partitions["years"]
#eia923_years = [2020]
eia923_settings = Eia923Settings(years=eia923_years)
eia_settings = EiaSettings(eia860=eia860_settings, eia923=eia923_settings)
ds_kwargs = {"local_cache_path": pudl_settings["data_dir"]}
ds = pudl.workspace.datastore.Datastore(**ds_kwargs)
%%time
eia860_extractor = pudl.extract.eia860.Extractor(ds)
eia860_raw_dfs = eia860_extractor.extract(year=eia860_settings.years)
if eia860_settings.eia860m:
eia860m_raw_dfs = pudl.extract.eia860m.Extractor(ds).extract(
year_month=eia860_settings.eia860m_date
)
eia860_raw_dfs = pudl.extract.eia860m.append_eia860m(
eia860_raw_dfs=eia860_raw_dfs,
eia860m_raw_dfs=eia860m_raw_dfs
)
%%time
eia860_transformed_dfs = pudl.transform.eia860.transform(
eia860_raw_dfs,
eia860_tables=eia860_settings.tables,
)
%%time
eia923_extractor = pudl.extract.eia923.Extractor(ds)
eia923_raw_dfs = eia923_extractor.extract(year=eia923_settings.years)
%%time
eia923_transformed_dfs = pudl.transform.eia923.transform(
eia923_raw_dfs,
eia923_tables=eia923_settings.tables,
)
%%time
eia_transformed_dfs = eia923_transformed_dfs.copy()
eia_transformed_dfs.update(eia860_transformed_dfs.copy())
# Do some final cleanup and assign appropriate types:
eia_transformed_dfs = {
name: pudl.helpers.convert_cols_dtypes(df, data_source="eia")
for name, df in eia_transformed_dfs.items()
}
entities_dfs, eia_transformed_dfs = pudl.transform.eia.transform(
eia_transformed_dfs,
eia860_years=eia860_settings.years,
eia923_years=eia923_settings.years,
eia860m=eia860_settings.eia860m,
)
# Assign appropriate types to new entity tables:
entities_dfs = {
name: pudl.helpers.apply_pudl_dtypes(df, group="eia")
for name, df in entities_dfs.items()
}
for table in entities_dfs:
entities_dfs[table] = (
pudl.metadata.classes.Package.from_resource_ids()
.get_resource(table)
.encode(entities_dfs[table])
)
out_dfs = pudl.etl._read_static_tables_eia()
out_dfs.update(entities_dfs)
out_dfs.update(eia_transformed_dfs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Observation - from the report contents page, I can navigate via the Back button to https
Step2: Report Contents Page Link Scraper
Step3: Report - Page Scraper
Step4: Report - Oral Session Page Scraper
Step5: Report - Written Evidence Scraper
Step6: Scraping the Government Response
|
<ASSISTANT_TASK:>
Python Code:
url='https://publications.parliament.uk/pa/cm201516/cmselect/cmwomeq/584/58402.htm'
import pandas as pd
import requests
import requests_cache
requests_cache.install_cache('parli_comm_cache')
from bs4 import BeautifulSoup
#https://www.dataquest.io/blog/web-scraping-tutorial-python/
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
#What does a ToC item look like?
soup.select('p[class*="ToC"]')[5].find('a')
url_written=None
url_witnesses=None
for p in soup.select('p[class*="ToC"]'):
#witnesses
if 'Witnesses' in p.find('a'):
url_witnesses=p.find('a')['href']
#written evidence
if 'Published written evidence' in p.find('a'):
url_written=p.find('a')['href']
url_written, url_witnesses
#https://stackoverflow.com/a/34661518/454773
pages=[]
for EachPart in soup.select('p[class*="ToC"]'):
href=EachPart.find('a')['href']
#Fudge to collect URLs of pages asssociated with report content
if '#_' in href:
pages.append(EachPart.find('a')['href'].split('#')[0])
pages=list(set(pages))
pages
#We need to get the relative path for the page...
import os.path
stub=os.path.split(url)
stub
#Grab all the pages in the report
for p in pages:
r=requests.get('{}/{}'.format(stub[0],p))
pagesoup=BeautifulSoup(r.content, 'html.parser')
print(str(pagesoup.select('div[id="shellcontent"]')[0])[:2000])
import re
def evidenceRef(pagesoup):
qs=[]
ws=[]
#Grab list of questions
for p in pagesoup.select('div[class="_idFootnote"]'):
#Find oral question numbers
q=re.search(r'^.*\s+(Q[0-9]*)\s*$', p.find('p').text)
if q:
qs.append(q.group(1))
#Find links to written evidence
links=p.find('p').findAll('a')
if len(links)>1:
if links[1]['href'].startswith('http://data.parliament.uk/WrittenEvidence/CommitteeEvidence.svc/EvidenceDocument/'):
ws.append(links[1].text.strip('()'))
return qs, ws
evidenceRef(pagesoup)
qs=[]
ws=[]
for p in pages:
r=requests.get('{}/{}'.format(stub[0],p))
pagesoup=BeautifulSoup(r.content, 'html.parser')
pagesoup.select('div[id="shellcontent"]')[0]
qstmp,wstmp= evidenceRef(pagesoup)
qs += qstmp
ws +=wstmp
pd.DataFrame(qs)[0].value_counts().head()
pd.DataFrame(ws)[0].value_counts().head()
#url='https://publications.parliament.uk/pa/cm201516/cmselect/cmwomeq/584/58414.htm'
if url_witnesses is not None:
r=requests.get('{}/{}'.format(stub[0],url_witnesses))
pagesoup=BeautifulSoup(r.content, 'html.parser')
l1=[t.text.split('\t')[0] for t in pagesoup.select('h2[class="WitnessHeading"]')]
l2=pagesoup.select('table')
pd.DataFrame({'a':l1,'b':l2})
#Just as easy to do this by hand
items=[]
items.append(['Tuesday 15 December 2015','Chris Giles', 'Economics Editor', 'The Financial Times','Q1', 'Q35'])
items.append(['Tuesday 15 December 2015','Dr Alison Parken', 'Women Adding Value to the Economy (WAVE)', 'Cardiff University','Q1', 'Q35'])
items.append(['Tuesday 15 December 2015','Professor Jill Rubery','', 'Manchester University','Q1', 'Q35'])
items.append(['Tuesday 15 December 2015','Sheila Wild', 'Founder', 'Equal Pay Portal','Q1', 'Q35'])
items.append(['Tuesday 15 December 2015','Professor the Baroness Wolf of Dulwich', "King's College", 'London','Q1', 'Q35'])
items.append(['Tuesday 15 December 2015','Neil Carberry', 'Director for Employment and Skills', 'CBI','Q36','Q58'])
items.append(['Tuesday 15 December 2015','Ann Francke', 'Chief Executive', 'Chartered Management Institute','Q36','Q58'])
items.append(['Tuesday 15 December 2015','Monika Queisser',' Senior Counsellor and Head of Social Policy', 'Organisation for Economic Cooperation and Development','Q36','Q58'])
items.append(['Tuesday 12 January 2016','Amanda Brown', 'Assistant General Secretary', 'NUT','Q59','Q99'])
items.append(['Tuesday 12 January 2016','Dr Sally Davies', 'President', "Medical Women's Federation",'Q59','Q99'])
items.append(['Tuesday 12 January 2016','Amanda Fone','Chief Executive Officer', 'F1 Recruitment and Search','Q59','Q99'])
items.append(['Tuesday 12 January 2016','Audrey Williams', 'Employment Lawyer and Partner',' Fox Williams','Q59','Q99'])
items.append(['Tuesday 12 January 2016','Anna Ritchie Allan', 'Project Manager', 'Close the Gap','Q100','Q136'])
items.append(['Tuesday 12 January 2016','Christopher Brooks', 'Policy Adviser', 'Age UK','Q100','Q136'])
items.append(['Tuesday 12 January 2016','Scarlet Harris', 'Head of Gender Equality', 'TUC','Q100','Q136'])
items.append(['Tuesday 12 January 2016','Mr Robert Stephenson-Padron', 'Managing Director', 'Penrose Care','Q100','Q136'])
items.append(['Tuesday 19 January 2016','Sarah Jackson', 'Chief Executive', 'Working Families','Q137','Q164'])
items.append(['Tuesday 19 January 2016','Adrienne Burgess', 'Joint Chief Executive and Head of Research', 'Fatherhood Institute','Q137','Q164'])
items.append(['Tuesday 19 January 2016','Maggie Stilwell', 'Partner', 'Ernst & Young LLP','Q137','Q164'])
items.append(['Tuesday 26 January 2016','Michael Newman', 'Vice-Chair', 'Discrimination Law Association','Q165','Q191'])
items.append(['Tuesday 26 January 2016','Duncan Brown', '','Institute for Employment Studies','Q165','Q191'])
items.append(['Tuesday 26 January 2016','Tim Thomas', 'Head of Employment and Skills', "EEF, the manufacturers' association",'Q165','Q191'])
items.append(['Tuesday 26 January 2016','Helen Fairfoul', 'Chief Executive', 'Universities and Colleges Employers Association','Q192','Q223'])
items.append(['Tuesday 26 January 2016','Emma Stewart', 'Joint Chief Executive Officer', 'Timewise Foundation','Q192','Q223'])
items.append(['Tuesday 26 January 2016','Claire Turner','', 'Joseph Rowntree Foundation','Q192','Q223'])
items.append(['Wednesday 10 February 2016','Rt Hon Nicky Morgan MP', 'Secretary of State for Education and Minister for Women and Equalities','Department for Education','Q224','Q296'])
items.append(['Wednesday 10 February 2016','Nick Boles MP', 'Minister for Skills', 'Department for Business, Innovation and Skills','Q224','Q296'])
df=pd.DataFrame(items,columns=['Date','Name','Role','Org','Qmin','Qmax'])
#Cleaning check
df['Org']=df['Org'].str.strip()
df['n_qmin']=df['Qmin'].str.strip('Q').astype(int)
df['n_qmax']=df['Qmax'].str.strip('Q').astype(int)
df['session']=df['Qmin']+'-'+df['n_qmax'].astype(str)
df.head()
#url='https://publications.parliament.uk/pa/cm201516/cmselect/cmwomeq/584/58415.htm'
all_written=[]
if url_written is not None:
r=requests.get('{}/{}'.format(stub[0],url_written))
pagesoup=BeautifulSoup(r.content, 'html.parser')
for p in pagesoup.select('p[class="EvidenceList1"]'):
#print(p)
#Get rid of span tags
for match in p.findAll('span[class="EvidenceList1Span"]'):
match.extract()
all_written.append((p.contents[1].strip('()').strip(), p.find('a')['href'],p.find('a').text))
written_df=pd.DataFrame(all_written)
written_df.columns=['Org','URL','RefNumber']
written_df.head()
def getSession(q):
return df[(df['n_qmin']<=q) & (df['n_qmax']>=q)].iloc[0]['session']
getSession(33)
#Report on sessions that included a question by count
df_qs=pd.DataFrame(qs, columns=['qn'])
df_qs['session']=df_qs['qn'].apply(lambda x: getSession(int(x.strip('Q'))) )
s_qs_cnt=df_qs['session'].value_counts()
s_qs_cnt
pd.concat([s_qs_cnt,df.groupby('session')['Org'].apply(lambda x: '; '.join(list(x)))],
axis=1).sort_values('session',ascending=False)
#Written evidence
df_ws=pd.DataFrame(ws,columns=['RefNumber'])
df_ws=df_ws.merge(written_df, on='RefNumber')
df_ws['Org'].value_counts().head()
#Organisations that gave written and witness evidence
set(df_ws['Org']).intersection(set(df['Org']))
#Note there are more matches that are hidden by dirty data
#- e.g. NUT and National Union of Teachers are presumably the same
#- e.g. F1 Recruitment and Search and F1 Recruitment Ltd are presumably the same
url='https://publications.parliament.uk/pa/cm201617/cmselect/cmwomeq/963/96302.htm'
#Inconsistency across different reports in terms of presentation, linking to evidence
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The goal, as before, it to build a model to describe the decision boundary between the "fast" and "slow" categories in our label column. We are going to skip the background information on the SVM for now, but you can read about how it works on Wikipedia. You can also read about the SVC (support vector classifier) on the Scikit Learn pages.
Step2: At this point I want to go back to the last set of classifiers we looked at with this exact same dataset
Step3: We see that there are actually a large number of support points in this model! So, even though it ends up being a linear decision boundary, it has quite a high complexity. Does the improvement in performance justify this increase in complexity? Perhaps...
Step4: Note that the number of support points has dramatically increased but the overall quality of the fit (as measured by the MCC) has decreased! We've added complexity and not improved our performance at all. This is called overfitting and we'll see more examples of it later.
Step5: Now we've reduced the number of parameters without reducing the MCC very much at all. At this point it doesn't matter how much bigger we make C, the SVC algorithm can't do any better.
Step6: This looks a lot like the linear boundary! We'll try changing the values to see what they do.
Step7: Reducing the penalty looks like it added a bit of curvature, but it increased the number of support points and lowered the MCC. Let's try the other way.
Step8: So we've reduced the number of support points and increased the performance of the model! The boundary is no longer a straight line, but it seems to do better than the straight line did. This is an example of reducing complexity and improving performance. That's the direction we want to go! Let's try the other hyperparameter
Step9: So we've improved performance again! But this time, look at the support points and the decision boundary
Step10: So, looking at all of these together, it looks like our best performance with the simplest model was where we had C=100 and $\gamma$=1. If we want to try to do even better, we could tune the paramters even further by exploring around this point to see if we can do a little better. Later in the course we'll take a look at tools that will simplify doing that optimization for us.
Step11: As a reminder, let's review the linear regression along with its graph.
Step12: Ok, we now try the support vector regression model.
Step13: That performance was worse than the linear model (an RMS error of 0.0 means a perfect fit). Let's try adjusting the hyperparameters. We'll make a function to make it easy to repeat this.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
#Note the new use of the dtype option here. We can directly tell pandas to use the Speed column as a category in one step.
speeddf = pd.read_csv("../Class04/Class04_speed_data.csv",dtype={'Speed':'category'})
#We'll use a different tool to plot the data now that we know how to group the data by a category. This will help us make better combined plots later on.
groups = speeddf.groupby('Speed')
# Plot
trainfig, ax = plt.subplots()
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.set_aspect(1)
ax.legend(bbox_to_anchor=(1,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
# Split the data into training and testing sets and prepare the features and labels
train, test = train_test_split(speeddf, test_size=0.2, random_state=23)
features_train = train[['Grade','Bumpiness']].values
labels_train = train['Speed'].values
features_test = test[['Grade','Bumpiness']].values
labels_test = test['Speed'].values
# Load the model and fit the data
svcmodel = SVC(kernel='linear')
svcmodel.fit(features_train,labels_train)
# Create our decision boundary mesh
# point in the mesh
x_min = 0.0; x_max = 1.0 # Mesh x size
y_min = 0.0; y_max = 1.0 # Mesh y size
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max+h, h), np.arange(y_min, y_max+h, h))
# Prepare the boundary
Zpred = pd.Series(svcmodel.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values
Z = Zpred.reshape(xx.shape)
# First plot our points
testfig1, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1,axes=ax)
ax.set_aspect(1)
# Plot test points
groups = test.groupby('Speed')
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.legend(bbox_to_anchor=(1,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
# Let's look at the metrics for this fit:
from sklearn.metrics import confusion_matrix
class_labels = ["slow", "fast"]
y_pred = svcmodel.predict(features_test)
cnf_matrix = confusion_matrix(labels_test, y_pred,labels=class_labels)
print(cnf_matrix)
def show_confusion_matrix(cnf_matrix, class_labels, ax=None):
if not ax:
plt.matshow(cnf_matrix,cmap=plt.cm.YlGn,alpha=0.7)
ax = plt.gca()
else:
ax.matshow(cnf_matrix,cmap=plt.cm.YlGn,alpha=0.7)
ax.set_xlabel('Predicted Label', fontsize=16)
ax.set_xticks(range(0,len(class_labels)))
ax.set_xticklabels(class_labels)
ax.set_ylabel('Actual Label', fontsize=16, rotation=90)
ax.set_yticks(range(0,len(class_labels)))
ax.set_yticklabels(class_labels)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
for row in range(len(cnf_matrix)):
for col in range(len(cnf_matrix[row])):
ax.text(col, row, cnf_matrix[row][col], va='center', ha='center', fontsize=16)
show_confusion_matrix(cnf_matrix,class_labels)
import sklearn.metrics as metrics
recall_score = metrics.recall_score(labels_test, y_pred,labels=class_labels,average=None)
prec_score = metrics.precision_score(labels_test, y_pred,labels=class_labels,average=None)
f1_score = metrics.f1_score(labels_test, y_pred,labels=class_labels,average=None)
acc_score = metrics.accuracy_score(labels_test, y_pred)
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Class-dependent Metrics")
print("Sensitivity/Recall Score: {}".format(recall_score))
print("Precision Score: {}".format(prec_score))
print("F1 Score: {}".format(f1_score))
print("\nClass-independent Metrics")
print("Accuracy Score: {}".format(acc_score))
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
support_points = features_train[svcmodel.support_]
#We plot the decision boundary and the support points
figsvc1 ,ax = plt.subplots()
ax.set_title("N support points: {}".format(len(support_points)))
ax.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1)
ax.set_aspect(1)
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
ax.scatter(x=support_points[:,0],y=support_points[:,1],s=20, facecolors='none', edgecolors='r')
# Start with a value of C=0.05
svcmodel2 = SVC(kernel='linear',C=.05)
svcmodel2.fit(features_train,labels_train)
Z = pd.Series(svcmodel2.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
support_points = features_train[svcmodel2.support_]
fig = plt.figure()
ax1 = fig.add_subplot(121)
y_pred = svcmodel2.predict(features_test)
cnf_matrix = confusion_matrix(labels_test, y_pred,labels=class_labels)
show_confusion_matrix(cnf_matrix,class_labels,ax=ax1)
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
ax2 = fig.add_subplot(122)
# First plot our points
ax2.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1)
ax2.set_title("N support points: {}".format(len(support_points)))
ax2.set_aspect(1)
ax2.set_xlabel('Grade')
ax2.set_ylabel('Bumpiness')
# Circle the support points
ax2.scatter(x=support_points[:,0],y=support_points[:,1],s=20, facecolors='none', edgecolors='r')
svcmodel3 = SVC(kernel='linear',C=1000)
svcmodel3.fit(features_train,labels_train)
# Prepare the boundary
Z = pd.Series(svcmodel3.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
support_points = features_train[svcmodel3.support_]
fig2 = plt.figure()
ax1 = fig2.add_subplot(121)
y_pred = svcmodel3.predict(features_test)
cnf_matrix = confusion_matrix(labels_test, y_pred,labels=class_labels)
show_confusion_matrix(cnf_matrix,class_labels,ax=ax1)
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
ax2 = fig2.add_subplot(122)
# First plot our points
ax2.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1)
ax2.set_aspect(1)
ax2.set_title("N support points: {}".format(len(support_points)))
ax2.set_xlabel('Grade')
ax2.set_ylabel('Bumpiness')
# Circle the support points
ax2.scatter(x=support_points[:,0],y=support_points[:,1],s=20, facecolors='none', edgecolors='r')
svcmodel4 = SVC(kernel='rbf',C=1, gamma=1)
svcmodel4.fit(features_train,labels_train)
y_pred = svcmodel4.predict(features_test)
# Prepare the boundary
Z = pd.Series(svcmodel4.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
support_points = features_train[svcmodel4.support_]
fig4 = plt.figure()
ax1 = fig4.add_subplot(121)
cnf_matrix = confusion_matrix(labels_test, y_pred,labels=class_labels)
show_confusion_matrix(cnf_matrix,class_labels,ax=ax1)
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
ax2 = fig4.add_subplot(122)
# First plot our points
ax2.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1)
ax2.set_title("N support points: {}".format(len(support_points)))
ax2.set_aspect(1)
ax2.set_xlabel('Grade')
ax2.set_ylabel('Bumpiness')
# Circle the support points
ax2.scatter(x=support_points[:,0],y=support_points[:,1],s=20, facecolors='none', edgecolors='r')
svcmodel5 = SVC(kernel='rbf',C=0.1, gamma=1)
svcmodel5.fit(features_train,labels_train)
y_pred = svcmodel5.predict(features_test)
# Prepare the boundary
Z = pd.Series(svcmodel5.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
support_points = features_train[svcmodel5.support_]
fig5 = plt.figure()
ax1 = fig5.add_subplot(121)
cnf_matrix = confusion_matrix(labels_test, y_pred,labels=class_labels)
show_confusion_matrix(cnf_matrix,class_labels,ax=ax1)
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
ax2 = fig5.add_subplot(122)
# First plot our points
ax2.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1)
ax2.set_title("N support points: {}".format(len(support_points)))
ax2.set_aspect(1)
ax2.set_xlabel('Grade')
ax2.set_ylabel('Bumpiness')
# Circle the support points
ax2.scatter(x=support_points[:,0],y=support_points[:,1],s=20, facecolors='none', edgecolors='r')
def testsvcmodel(inputC,inputgamma):
svcmodelf = SVC(kernel='rbf',C=inputC, gamma=inputgamma)
svcmodelf.fit(features_train,labels_train)
y_pred = svcmodelf.predict(features_test)
# Prepare the boundary
Z = pd.Series(svcmodelf.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
support_points = features_train[svcmodelf.support_]
figf = plt.figure()
ax1 = figf.add_subplot(121)
cnf_matrix = confusion_matrix(labels_test, y_pred,labels=class_labels)
show_confusion_matrix(cnf_matrix,class_labels,ax=ax1)
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
ax2 = figf.add_subplot(122)
# First plot our points
ax2.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1)
ax2.set_title("N support points: {}".format(len(support_points)))
ax2.set_aspect(1)
ax2.set_xlabel('Grade')
ax2.set_ylabel('Bumpiness')
# Circle the support points
ax2.scatter(x=support_points[:,0],y=support_points[:,1],s=10, facecolors='none', edgecolors='r')
# Now we just need to call the function!
testsvcmodel(inputC=100,inputgamma=1)
testsvcmodel(inputC=100,inputgamma=50)
testsvcmodel(inputC=1,inputgamma=50)
testsvcmodel(inputC=0.1,inputgamma=50)
testsvcmodel(inputC=100,inputgamma=0.1)
testsvcmodel(inputC=0.1,inputgamma=0.1)
fakedata2 = pd.read_csv('../Class02/Class02_fakedata2.csv')
faketrain2, faketest2 = train_test_split(fakedata2, test_size=0.2, random_state=23)
faketrain2.plot.scatter(x='input',y='output')
faketest2.plot.scatter(x='input',y='output')
from sklearn.linear_model import LinearRegression
regr = LinearRegression()
features = faketrain2[['input']].values
labels = faketrain2['output'].values
regr.fit(features,labels)
testinputs = faketest2[['input']].values
predictions = regr.predict(testinputs)
actuals = faketest2['output'].values
print("RMS Error: {0:.3f}".format( np.sqrt(np.mean((predictions - actuals) ** 2))))
# Prepare the model line
X_plot =np.linspace(0, 1, 1000)
Y_pred = regr.predict(X_plot[:,None])
trainfig, ax = plt.subplots()
# First plot our points
ax.scatter(x=testinputs, y=actuals)
ax.plot(X_plot,Y_pred,c='r')
from sklearn.svm import SVR
svrmodel = SVR(C=1.0,gamma=1.0)
svrmodel.fit(features,labels)
svrpredictions = svrmodel.predict(testinputs)
print("RMS Error: {0:.3f}".format( np.sqrt(np.mean((svrpredictions - actuals) ** 2))))
# Prepare the model line
Y_pred = svrmodel.predict(X_plot[:,None])
trainfig, ax = plt.subplots()
# First plot our points
ax.scatter(x=testinputs, y=actuals)
ax.plot(X_plot,Y_pred,c='r')
def svrtest(inputC, inputgamma):
svrmodel = SVR(C=inputC,gamma=inputgamma)
svrmodel.fit(features,labels)
svrpredictions = svrmodel.predict(testinputs)
# Prepare the model line
Y_pred = svrmodel.predict(X_plot[:,None])
trainfig, ax = plt.subplots()
ax.set_title("RMS Error: {0:.3f}".format( np.sqrt(np.mean((svrpredictions - actuals) ** 2))))
# First plot our points
ax.scatter(x=testinputs, y=actuals)
ax.plot(X_plot,Y_pred,c='r')
svrtest(100,1)
svrtest(0.1,1)
svrtest(1,100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccr-iitm', 'iitm-esm', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 심층 합성곱 생성적 적대 신경망
Step2: 데이터셋 로딩 및 준비
Step3: 모델 만들기
Step4: (아직 훈련이 되지않은) 생성자를 이용해 이미지를 생성해봅시다.
Step5: 감별자
Step6: (아직까지 훈련이 되지 않은) 감별자를 사용하여, 생성된 이미지가 진짜인지 가짜인지 판별합니다. 모델은 진짜 이미지에는 양수의 값 (positive values)을, 가짜 이미지에는 음수의 값 (negative values)을 출력하도록 훈련되어집니다.
Step7: 손실함수와 옵티마이저 정의
Step8: 감별자 손실함수
Step9: 생성자 손실함수
Step10: 감별자와 생성자는 따로 훈련되기 때문에, 감별자와 생성자의 옵티마이저는 다릅니다.
Step11: 체크포인트 저장
Step12: 훈련 루프 정의하기
Step13: 훈련 루프는 생성자가 입력으로 랜덤시드를 받는 것으로부터 시작됩니다. 그 시드값을 사용하여 이미지를 생성합니다. 감별자를 사용하여 (훈련 세트에서 갖고온) 진짜 이미지와 (생성자가 생성해낸) 가짜이미지를 분류합니다. 각 모델의 손실을 계산하고, 그래디언트 (gradients)를 사용해 생성자와 감별자를 업데이트합니다.
Step14: 이미지 생성 및 저장
Step15: 모델 훈련
Step16: 마지막 체크포인트를 복구합니다.
Step17: GIF 생성
Step18: imageio로 훈련 중에 저장된 이미지를 사용해 GIF 애니메이션을 만듭니다.
Step19: 코랩에서 작업하고 있다면, 아래의 코드에서 애니메이션을 다운로드 받을 수 있습니다
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow-gpu==2.0.0-rc1
import tensorflow as tf
tf.__version__
# GIF를 만들기위해 설치합니다.
!pip install imageio
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
from IPython import display
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # 이미지를 [-1, 1]로 정규화합니다.
BUFFER_SIZE = 60000
BATCH_SIZE = 256
# 데이터 배치를 만들고 섞습니다.
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # 주목: 배치사이즈로 None이 주어집니다.
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print (decision)
# 이 메서드는 크로스 엔트로피 손실함수 (cross entropy loss)를 계산하기 위해 헬퍼 (helper) 함수를 반환합니다.
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
# 이 시드를 시간이 지나도 재활용하겠습니다.
# (GIF 애니메이션에서 진전 내용을 시각화하는데 쉽기 때문입니다.)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
# `tf.function`이 어떻게 사용되는지 주목해 주세요.
# 이 데코레이터는 함수를 "컴파일"합니다.
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# GIF를 위한 이미지를 바로 생성합니다.
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
# 15 에포크가 지날 때마다 모델을 저장합니다.
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
# print (' 에포크 {} 에서 걸린 시간은 {} 초 입니다'.format(epoch +1, time.time()-start))
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# 마지막 에포크가 끝난 후 생성합니다.
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
def generate_and_save_images(model, epoch, test_input):
# `training`이 False로 맞춰진 것을 주목하세요.
# 이렇게 하면 (배치정규화를 포함하여) 모든 층들이 추론 모드로 실행됩니다.
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
%%time
train(train_dataset, EPOCHS)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
# 에포크 숫자를 사용하여 하나의 이미지를 보여줍니다.
def display_image(epoch_no):
return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))
display_image(EPOCHS)
anim_file = 'dcgan.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
import IPython
if IPython.version_info > (6,2,0,''):
display.Image(filename=anim_file)
try:
from google.colab import files
except ImportError:
pass
else:
files.download(anim_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>Elementary operations</h2>
Step2: <h2>Algebra<h2>
Step3: <h2>Calculus</h2>
Step5: Illustrating Taylor series
Step6: With this function defined, we can now use it for any sympy function or expression
Step7: This shows easily how a Taylor series is useless beyond its convergence radius, illustrated by
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display
from sympy.interactive import printing
printing.init_printing(use_latex='mathjax')
from __future__ import division
import sympy as sym
from sympy import *
x, y, z = symbols("x y z")
k, m, n = symbols("k m n", integer=True)
f, g, h = map(Function, 'fgh')
Rational(3,2)*pi + exp(I*x) / (x**2 + y)
exp(I*x).subs(x,pi).evalf()
e = x + 2*y
srepr(e)
exp(pi * sqrt(163)).evalf(50)
eq = ((x+y)**2 * (x+1))
eq
expand(eq)
a = 1/x + (x*sin(x) - 1)/x
a
simplify(a)
eq = Eq(x**3 + 2*x**2 + 4*x + 8, 0)
eq
solve(eq, x)
a, b = symbols('a b')
Sum(6*n**2 + 2**n, (n, a, b))
limit((sin(x)-x)/x**3, x, 0)
(1/cos(x)).series(x, 0, 6)
diff(cos(x**2)**2 / (1+x), x)
integrate(x**2 * cos(x), (x, 0, pi/2))
eqn = Eq(Derivative(f(x),x,x) + 9*f(x), 1)
display(eqn)
dsolve(eqn, f(x))
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# You can change the default figure size to be a bit larger if you want,
# uncomment the next line for that:
#plt.rc('figure', figsize=(10, 6))
def plot_taylor_approximations(func, x0=None, orders=(2, 4), xrange=(0,1), yrange=None, npts=200):
Plot the Taylor series approximations to a function at various orders.
Parameters
----------
func : a sympy function
x0 : float
Origin of the Taylor series expansion. If not given, x0=xrange[0].
orders : list
List of integers with the orders of Taylor series to show. Default is (2, 4).
xrange : 2-tuple or array.
Either an (xmin, xmax) tuple indicating the x range for the plot (default is (0, 1)),
or the actual array of values to use.
yrange : 2-tuple
(ymin, ymax) tuple indicating the y range for the plot. If not given,
the full range of values will be automatically used.
npts : int
Number of points to sample the x range with. Default is 200.
if not callable(func):
raise ValueError('func must be callable')
if isinstance(xrange, (list, tuple)):
x = np.linspace(float(xrange[0]), float(xrange[1]), npts)
else:
x = xrange
if x0 is None: x0 = x[0]
xs = sym.Symbol('x')
# Make a numpy-callable form of the original function for plotting
fx = func(xs)
f = sym.lambdify(xs, fx, modules=['numpy'])
# We could use latex(fx) instead of str(), but matploblib gets confused
# with some of the (valid) latex constructs sympy emits. So we play it safe.
plt.plot(x, f(x), label=str(fx), lw=2)
# Build the Taylor approximations, plotting as we go
apps = {}
for order in orders:
app = fx.series(xs, x0, n=order).removeO()
apps[order] = app
# Must be careful here: if the approximation is a constant, we can't
# blindly use lambdify as it won't do the right thing. In that case,
# evaluate the number as a float and fill the y array with that value.
if isinstance(app, sym.numbers.Number):
y = np.zeros_like(x)
y.fill(app.evalf())
else:
fa = sym.lambdify(xs, app, modules=['numpy'])
y = fa(x)
tex = sym.latex(app).replace('$', '')
plt.plot(x, y, label=r'$n=%s:\, %s$' % (order, tex) )
# Plot refinements
if yrange is not None:
plt.ylim(*yrange)
plt.grid()
plt.legend(loc='best').get_frame().set_alpha(0.8)
plot_taylor_approximations(sin, 0, [2, 4, 6], (0, 2*pi), (-2,2))
plot_taylor_approximations(cos, 0, [2, 4, 6], (0, 2*pi), (-2,2))
# For an expression made from elementary functions, we must first make it into
# a callable function, the simplest way is to use the Python lambda construct.
plot_taylor_approximations(lambda x: 1/cos(x), 0, [2,4,6], (0, 2*pi), (-5,5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: let's create a new bucket
Step2: everything is better with labels
Step3: Upload a nice foto there
Step4: The Photo is there on the internet!
|
<ASSISTANT_TASK:>
Python Code:
#some help
!gsutil --help
#list my buckets
#(projectId has been set in the gcloud config
# you can use the parameter -p projectId instead )
!gsutil ls
#more details
!gsutil ls -L
#some help
!gsutil mb --help
#creating a new bucket class regional en region wurope-west2
!gsutil mb -c regional -l europe-west2 gs://a-brand-new-bucket-toto/
#checking
!gsutil ls
#some help
!gsutil label --help
#setting label
!gsutil label ch -l env:test gs://a-brand-new-bucket-toto/
#getting the labels
!gsutil label get gs://a-brand-new-bucket-toto/
#what does the help say?
!gsutil cp --help
#let's copy Formentera.JPG there
!gsutil cp "Formentera.JPG" gs://a-brand-new-bucket-toto/
#checking the result
!gsutil ls gs://a-brand-new-bucket-toto/
#the file is there let's have a look
!gsutil ls -l gs://a-brand-new-bucket-toto/Formentera.JPG
#with more details
!gsutil ls -L gs://a-brand-new-bucket-toto/Formentera.JPG
#let's make it public!
!gsutil acl ch -u AllUsers:R gs://a-brand-new-bucket-toto/Formentera.JPG
#let's move it
!gsutil mv gs://a-brand-new-bucket-toto/Formentera.JPG gs://a-brand-new-bucket-toto/formentera.jpeg
#check the result
!gsutil ls gs://a-brand-new-bucket-toto
#delete the file
!gsutil rm gs://a-brand-new-bucket-toto/formentera.jpeg
#deleting the bucket
!gsutil rm -r gs://a-brand-new-bucket-toto
#check the result
!gsutil ls
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convenience function to set up a graph
Step2: Implementation
Step3: Show individual waves, assuming $s=0$ at $t=0$ ($theta = 0$)
Step4: Show the damping of the higher frequencies at different values of $x$.
Step5: In the plot, each wave is also delayed, and more so, the larger $x$ is. To remove this delay to better show the effect of filtering out the higher frequences, multiply $a$ by zero within the $\sin$ function.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
def newfig(title='?', xlabel='?', ylabel='?', xlim=None, ylim=None,
xscale='linear', yscale='linear', size_inches=(14, 8)):
'''Setup a new axis for plotting'''
fig, ax = plt.subplots()
fig.set_size_inches(size_inches)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
ax.grid(True)
return ax
kD = 600 # m2/d
S = 0.1 # [-]
# a sequence of angle velocities
omega = np.array([1, 3, 5, 7, 11, 13, 17, 19, 23, 29]) / (2 * np.pi) # cycles per day
# some random initial delays
theta = np.random.rand(len(omega)) + 0.5
A = 1.0 # m, Wave amplitude
t = 0.35
x = np.linspace(0, 1500, 101)
ax = newfig('Damped waves, with different angular velocities', 'x [m]', 's(x, t) [[m]]')
for omega_, theta_ in zip(omega, theta):
a = np.sqrt(omega_ * S / (2 * kD))
y = A * np.exp(-a * x) * np.sin(omega_ * t * 0 - a * x + 0 * theta_)
ax.plot(x, y, label="$2 \pi \omega$ = {:.1f}".format(omega_ * 2 * np.pi))
ax.legend()
x =[ 0, 100, 200, 300]
t = 24 * np.linspace(0, 1, 24*60 + 1) # every minute
ax = newfig("Superimposed waves at different locations $x$", 'time [d]', 's [m]')
for xi in x:
y = np.zeros_like(t)
for omega_i, theta_i in zip(omega, theta):
a = np.sqrt(omega_i * S /(2 * kD))
y += A * np.exp(-a * xi) * np.sin(omega_i * t - a * xi + theta_i)
ax.plot(t, y, label='x = {:.0f} m'.format(xi))
ax.legend()
x = np.linspace(0, 200, 201)
omega = np.array([1, 5, 25, 125]) / (2 * np.pi) # cycles per day
ax = newfig("Superimposed evenlopes of the waves $x$", 'x [m]', 's [m]')
y_env_tot = np.zeros_like(x)
for omega_i in omega:
a = np.sqrt(omega_i * S /(2 * kD))
y_env = A * np.exp(-a * x)
ln = ax.plot(x, y_env, label="$2 \pi \omega$ = {:.1f}".format(omega_i * 2 * np.pi))
ax.plot(x, -y_env, ln[0].get_color())
y_env_tot += y_env
ax.plot(x, y_env_tot, 'k', lw=2, label='total evelope, sum of the 4 individual ones')
ax.plot(x, -y_env_tot, 'k', lw=2)
ax.legend(loc='right')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 7.1 Write checksums
Step2: 7.1 Write the results to file
|
<ASSISTANT_TASK:>
Python Code:
import os
import yaml
import json
from download_and_process_functions import get_sha_hash
output_path = 'output'
files = [
'conventional_power_plants_DE.csv', 'conventional_power_plants_DE.xlsx',
'conventional_power_plants_EU.csv', 'conventional_power_plants_EU.xlsx',
'conventional_power_plants.sqlite']
hash_dict = {}
filesize_dict = {}
with open('checksums.txt', 'w') as f:
for file_name in files:
file_hash = get_sha_hash(os.path.join(output_path, file_name))
hash_dict[file_name] = file_hash
filesize_dict[file_name] = os.path.getsize(output_path)
f.write('{},{}\n'.format(file_name, file_hash))
with open(os.path.join('input', 'metadata.yml'), 'r') as f:
metadata = yaml.load(f.read(), Loader=yaml.BaseLoader)
metadata['resources'][0]['hash'] = hash_dict['conventional_power_plants_DE.csv']
metadata['resources'][1]['hash'] = hash_dict['conventional_power_plants_DE.xlsx']
metadata['resources'][2]['hash'] = hash_dict['conventional_power_plants_EU.csv']
metadata['resources'][3]['hash'] = hash_dict['conventional_power_plants_EU.xlsx']
metadata['resources'][0]['bytes'] = filesize_dict['conventional_power_plants_DE.csv']
metadata['resources'][1]['bytes'] = filesize_dict['conventional_power_plants_DE.xlsx']
metadata['resources'][2]['bytes'] = filesize_dict['conventional_power_plants_EU.csv']
metadata['resources'][3]['bytes'] = filesize_dict['conventional_power_plants_EU.xlsx']
datapackage_json = json.dumps(metadata, indent=4, separators=(',', ': '))
# Write the information of the metadata
with open(os.path.join('output', 'datapackage.json'), 'w') as f:
f.write(datapackage_json)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify the response and predictor columns
Step2: Train Deep Learning model and validate on test set
Step3: Inspect the model in Flow
Step4: Extracting and Handling the Results
Step5: Note
Step6: Clearly, the model parameters aren't tuned perfectly yet, as 4-5% test set error is rather large.
Step7: Predicting
Step8: Variable Importance
Step9: Model Comparison with Grid Search
Step10: Random Grid Search
Step11: Model Checkpoints
Step12: Specify a model and a file path. The default path is the current working directory.
Step13: After restarting H2O, you can load the saved model by specifying the host and model file path.
Step14: You can also use the following commands to retrieve a model from its H2O key.
|
<ASSISTANT_TASK:>
Python Code:
import h2o
h2o.init(nthreads=-1)
import os.path
PATH = os.path.expanduser("~/h2o-3/")
test_df = h2o.import_file(PATH + "bigdata/laptop/mnist/test.csv.gz")
train_df = h2o.import_file(PATH + "bigdata/laptop/mnist/train.csv.gz")
y = "C785"
x = train_df.names[0:784]
train_df[y] = train_df[y].asfactor()
test_df[y] = test_df[y].asfactor()
from h2o.estimators.deepwater import H2ODeepWaterEstimator
model = H2ODeepWaterEstimator(
distribution="multinomial",
activation="rectifier",
mini_batch_size=128,
hidden=[1024,1024],
hidden_dropout_ratios=[0.5,0.5], ## for better generalization
input_dropout_ratio=0.1,
sparse=True, ## can result in speedup for sparse data
epochs=10) ## need more epochs for a better model
model.train(
x=x,
y=y,
training_frame=train_df,
validation_frame=test_df
)
model.scoring_history()
model.model_performance(train=True) # training metrics
model.model_performance(valid=True) # validation metrics
model_crossvalidated = H2ODeepWaterEstimator(
distribution="multinomial",
activation="rectifier",
mini_batch_size=128,
hidden=[1024,1024],
hidden_dropout_ratios=[0.5,0.5],
input_dropout_ratio=0.1,
sparse=True,
epochs=10,
nfolds=3
)
model_crossvalidated.train(
x=x,
y=y,
training_frame=train_df
)
# View specified parameters of the Deep Learning model
model_crossvalidated.params;
# Examine the trained model
model_crossvalidated
## Validation error of the original model (using a train/valid split)
model.mean_per_class_error(valid=True)
## Training error of the model trained on 100% of the data
model_crossvalidated.mean_per_class_error(train=True)
## Estimated generalization error of the cross-validated model
model_crossvalidated.mean_per_class_error(xval=True)
#ls ../../h2o-docs/src/booklets/v2_2015/source/images/
predictions = model_crossvalidated.predict(test_df)
predictions.describe()
# Train Deep Learning model and validate on test set and save the variable importances
from h2o.estimators.deeplearning import H2ODeepLearningEstimator ## H2ODeepWaterEstimator doesn't yet have variable importances
model_variable_importances = H2ODeepLearningEstimator(
distribution="multinomial",
activation="RectifierWithDropout", ## shortcut for hidden_dropout_ratios=[0.5,0.5,0.5]
hidden=[32,32,32], ## smaller number of neurons to be fast enough on the CPU
input_dropout_ratio=0.1,
sparse=True,
epochs=1, ## not interested in a good model here
variable_importances=True) ## this is not yet implemented for DeepWaterEstimator
model_variable_importances.train(
x=x,
y=y,
training_frame=train_df,
validation_frame=test_df)
# Retrieve the variable importance
import pandas as pd
pd.DataFrame(model_variable_importances.varimp())
model_variable_importances.varimp_plot(num_of_features=20)
from h2o.grid.grid_search import H2OGridSearch
hyper_parameters = {
"hidden":[[200,200,200],[300,300]],
"learning_rate":[1e-3,5e-3],
}
model_grid = H2OGridSearch(H2ODeepWaterEstimator, hyper_params=hyper_parameters)
model_grid.train(
x=x,
y=y,
distribution="multinomial",
epochs=50, ## might stop earlier since we enable early stopping below
training_frame=train_df,
validation_frame=test_df,
score_interval=2, ## score no more than every 2 seconds
score_duty_cycle=0.5, ## score up to 50% of the time - to enable early stopping
score_training_samples=1000, ## use a subset of the training frame for faster scoring
score_validation_samples=1000, ## use a subset of the validation frame for faster scoring
stopping_rounds=3,
stopping_tolerance=0.05,
stopping_metric="misclassification",
sparse = True,
mini_batch_size=256
)
# print model grid search results
model_grid
for gmodel in model_grid:
print gmodel.model_id + " mean per class error: " + str(gmodel.mean_per_class_error())
import pandas as pd
grid_results = pd.DataFrame([[m.model_id, m.mean_per_class_error(valid=True)] for m in model_grid])
grid_results
hyper_parameters = {
"hidden":[[1000,1000],[2000]],
"learning_rate":[s*1e-3 for s in range(30,100)],
"momentum_start":[s*1e-3 for s in range(0,900)],
"momentum_stable":[s*1e-3 for s in range(900,1000)],
}
search_criteria = {"strategy":"RandomDiscrete", "max_models":10, "max_runtime_secs":100, "seed":123456}
model_grid_random_search = H2OGridSearch(H2ODeepWaterEstimator,
hyper_params=hyper_parameters,
search_criteria=search_criteria)
model_grid_random_search.train(
x=x, y=y,
distribution="multinomial",
epochs=50, ## might stop earlier since we enable early stopping below
training_frame=train_df,
validation_frame=test_df,
score_interval=2, ## score no more than every 2 seconds
score_duty_cycle=0.5, ## score up to 50% of the wall clock time - scoring is needed for early stopping
score_training_samples=1000, ## use a subset of the training frame for faster scoring
score_validation_samples=1000, ## use a subset of the validation frame for faster scoring
stopping_rounds=3,
stopping_tolerance=0.05,
stopping_metric="misclassification",
sparse = True,
mini_batch_size=256)
grid_results = pd.DataFrame([[m.model_id, m.mean_per_class_error(valid=True)] for m in model_grid_random_search])
grid_results
# Re-start the training process on a saved DL model using the ‘checkpoint‘ argument
model_checkpoint = H2ODeepWaterEstimator(
checkpoint=model.model_id,
activation="rectifier",
distribution="multinomial",
mini_batch_size=128,
hidden=[1024,1024],
hidden_dropout_ratios=[0.5,0.5],
input_dropout_ratio=0.1,
sparse=True,
epochs=20) ## previous model had 10 epochs, so we need to only train for 10 more to get to 20 epochs
model_checkpoint.train(
x=x,
y=y,
training_frame=train_df,
validation_frame=test_df)
model_checkpoint.scoring_history()
model_path = h2o.save_model(
model = model,
#path = "/tmp/mymodel",
force = True)
print model_path
!ls -lah $model_path
# Load model from disk
saved_model = h2o.load_model(model_path)
# Retrieve model by H2O key
model = h2o.get_model(model_id=model_checkpoint._id)
model
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 9900 genes in the network. 4181 of those are always expressed.
|
<ASSISTANT_TASK:>
Python Code:
print aces_gene_names[:10]
alist = list(aces_gene_names[:10])
gn1 = 'Entrez_5982'
gn2 = 'Entrez_76'
print alist.index(gn1)
print alist.index(gn2)
aces_gene_names = list(aces_gene_names)
edges_set = set([]) # (gene_idx_1, gene_idx_2)
# gene_idx_1 < gene_idx_2
# idx in aces_gene_names, starting at 0
with open('ACES/experiments/data/I2D_edges_0411.sif') as f:
for line in f:
ls = line.split()
gene_name_1 = 'Entrez_%s' % ls[0]
gene_name_2 = 'Entrez_%s' % ls[2]
# Exclude self edges
if gene_name_1 == gene_name_2:
continue
try:
gene_idx_1 = aces_gene_names.index(gene_name_1)
gene_idx_2 = aces_gene_names.index(gene_name_2)
except ValueError:
continue
if gene_idx_1 < gene_idx_2:
e = (gene_idx_1, gene_idx_2)
else:
e = (gene_idx_2, gene_idx_1)
edges_set.add(e)
f.close()
np.savetxt('I2D_edges.txt', np.array([list(x) for x in list(edges_set)]), fmt='%d')
len(edges_set)
edges_list = np.array(edges_list)
genes_in_network = set(np.array([list(x) for x in list(edges_set)]).flatten())
print len(genes_in_network)
len(set(np.where(np.sum(X_zeroed, axis=0)==0)[0]).intersection(genes_in_network))
X_binary = np.where(X<-1, 0, 1)
float(np.count_nonzero(X_binary))/(X.shape[1]*X.shape[1])
np.count_nonzero(X_binary)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, import the relevant modules
Step2: Step 1
Step3: Step 2
Step4: Here, we formulate a FindConnection query with "CML" as the input_ojb, "imatinib" as the output_obj. We further specify with the intermediate_nodes parameter that we are looking for paths joining chronic myelogenous leukemia and imatinib with one intermediate node that is a Gene. (The ability to search for longer reasoning paths that include additional intermediate nodes will be added shortly.)
Step5: We next execute the connect method, which performs the query path planning and query path execution process. In short, BioThings Explorer is deconstructing the query into individual API calls, executing those API calls, then assembling the results.
Step6: Step 3
Step7: While most results are based on edges from semmed, edges from DGIdb, biolink, disgenet, mydisease.info and drugcentral were also retrieved from their respective APIs.
Step8: Not surprisingly, the top two genes that BioThings Explorer found that join imatinib to CML are ABL1 and BCR, the two genes that are fused in the "Philadelphia chromosome", the genetic abnormality that underlies CML, and the validate target of imatinib.
Step9: Comparing results between CML and GIST
|
<ASSISTANT_TASK:>
Python Code:
%%capture
!pip install git+https://github.com/biothings/biothings_explorer#egg=biothings_explorer
# import modules from biothings_explorer
from biothings_explorer.hint import Hint
from biothings_explorer.user_query_dispatcher import FindConnection
import nest_asyncio
nest_asyncio.apply()
ht = Hint()
# find all potential representations of CML
cml_hint = ht.query("chronic myelogenous leukemia")
# select the correct representation of CML
cml = cml_hint['Disease'][0]
cml
# find all potential representations of imatinib
imatinib_hint = ht.query("imatinib")
# select the correct representation of imatinib
imatinib = imatinib_hint['ChemicalSubstance'][0]
imatinib
help(FindConnection.__init__)
fc = FindConnection(input_obj=cml, output_obj=imatinib, intermediate_nodes='Gene')
# set verbose=True will display all steps which BTE takes to find the connection
fc.connect(verbose=True)
df = fc.display_table_view()
# because UMLS is not currently well-integrated in our ID-to-object translation system, removing UMLS-only entries here
patternDel = "^UMLS:C\d+"
filter = df.node1_id.str.contains(patternDel)
df = df[~filter]
print(df.shape)
df.sample(10)
df.node1_name.value_counts().head(10)
# fetch all articles connecting 'chronic myelogenous leukemia' and 'ABL1'
articles = []
for info in fc.display_edge_info('chronic myelogenous leukemia', 'ABL1').values():
if 'pubmed' in info['info']:
articles += info['info']['pubmed']
print("There are "+str(len(articles))+" articles supporting the edge between CML and ABL1. Sampling of 10 of those:")
x = [print("http://pubmed.gov/"+str(x)) for x in articles[0:10] ]
# fetch all articles connecting 'ABL1' and 'Imatinib
articles = []
for info in fc.display_edge_info('ABL1', 'imatinib').values():
if 'pubmed' in info['info']:
articles += info['info']['pubmed']
print("There are "+str(len(articles))+" articles supporting the edge between ABL1 and imatinib. Sampling of 10 of those:")
x = [print("http://pubmed.gov/"+str(x)) for x in articles[0:10] ]
ht = Hint()
# find all potential representations of CML
gist_hint = ht.query("gastrointestinal stromal tumor")
# select the correct representation of CML
gist = gist_hint['Disease'][0]
gist
fc = FindConnection(input_obj=gist, output_obj=imatinib, intermediate_nodes='Gene')
fc.connect(verbose=False) # skipping the verbose log here
df = fc.display_table_view()
# because UMLS is not currently well-integrated in our ID-to-object translation system, removing UMLS-only entries here
patternDel = "^UMLS:C\d+"
filter = df.node1_id.str.contains(patternDel)
df = df[~filter]
print(df.shape)
df.sample(10)
df.node1_name.value_counts().head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this chapter, we are going to take a look at how to perform statistical inference on graphs.
Step2: You can verify that there's approximately 20% of $\frac{30^2 - 30}{2} = 435$.
Step3: We can also look at the degree distribution
Step4: Barabasi-Albert Graph
Step5: And the degree distribution
Step6: You can see that even though the number of edges between the two graphs are similar,
Step7: As is always the case, let's make sure we know some basic stats of the graph.
Step8: Let's also examine the degree distribution of the graph.
Step9: Finally, we should visualize the graph to get a feel for it.
Step10: One thing we might infer from this visualization
Step11: Comparison with Erdos-Renyi graphs
Step14: Given the degree distribution only, which model do you think better describes the generation of a protein-protein interaction network?
Step15: Notice that because the graphs are instantiated in a non-deterministic fashion, re-running the cell above will give you different values for each new graph generated.
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import YouTubeVideo
YouTubeVideo(id="P-0CJpO3spg", width="100%")
import networkx as nx
G_er = nx.erdos_renyi_graph(n=30, p=0.2)
nx.draw(G_er)
len(G_er.edges())
len(G_er.edges()) / 435
import pandas as pd
from nams.functions import ecdf
import matplotlib.pyplot as plt
x, y = ecdf(pd.Series(dict(nx.degree(G_er))))
plt.scatter(x, y)
G_ba = nx.barabasi_albert_graph(n=30, m=3)
nx.draw(G_ba)
len(G_ba.edges())
x, y = ecdf(pd.Series(dict(nx.degree(G_ba))))
plt.scatter(x, y)
from nams import load_data as cf
G = cf.load_propro_network()
for n, d in G.nodes(data=True):
G.nodes[n]["degree"] = G.degree(n)
len(G.nodes())
len(G.edges())
x, y = ecdf(pd.Series(dict(nx.degree(G))))
plt.scatter(x, y)
import nxviz as nv
from nxviz import annotate
nv.circos(G, sort_by="degree", node_color_by="degree", node_enc_kwargs={"size_scale": 10})
annotate.node_colormapping(G, color_by="degree")
from ipywidgets import interact, IntSlider
m = IntSlider(value=2, min=1, max=10)
@interact(m=m)
def compare_barabasi_albert_graph(m):
fig, ax = plt.subplots()
G_ba = nx.barabasi_albert_graph(n=len(G.nodes()), m=m)
x, y = ecdf(pd.Series(dict(nx.degree(G_ba))))
ax.scatter(x, y, label="Barabasi-Albert Graph")
x, y = ecdf(pd.Series(dict(nx.degree(G))))
ax.scatter(x, y, label="Protein Interaction Network")
ax.legend()
from ipywidgets import FloatSlider
p = FloatSlider(value=0.6, min=0, max=0.1, step=0.001)
@interact(p=p)
def compare_erdos_renyi_graph(p):
fig, ax = plt.subplots()
G_er = nx.erdos_renyi_graph(n=len(G.nodes()), p=p)
x, y = ecdf(pd.Series(dict(nx.degree(G_er))))
ax.scatter(x, y, label="Erdos-Renyi Graph")
x, y = ecdf(pd.Series(dict(nx.degree(G))))
ax.scatter(x, y, label="Protein Interaction Network")
ax.legend()
ax.set_title(f"p={p}")
from scipy.stats import wasserstein_distance
def erdos_renyi_degdist(n, p):
Return a Pandas series of degree distribution of an Erdos-Renyi graph.
G = nx.erdos_renyi_graph(n=n, p=p)
return pd.Series(dict(nx.degree(G)))
def barabasi_albert_degdist(n, m):
Return a Pandas series of degree distribution of an Barabasi-Albert graph.
G = nx.barabasi_albert_graph(n=n, m=m)
return pd.Series(dict(nx.degree(G)))
deg = pd.Series(dict(nx.degree(G)))
er_deg = erdos_renyi_degdist(n=len(G.nodes()), p=0.001)
ba_deg = barabasi_albert_degdist(n=len(G.nodes()), m=1)
wasserstein_distance(deg, er_deg), wasserstein_distance(deg, ba_deg)
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
er_dist = []
ba_dist = []
for _ in tqdm(range(100)):
er_deg = erdos_renyi_degdist(n=len(G.nodes()), p=0.001)
er_dist.append(wasserstein_distance(deg, er_deg))
ba_deg = barabasi_albert_degdist(n=len(G.nodes()), m=1)
ba_dist.append(wasserstein_distance(deg, ba_deg))
# er_degs = [erdos_renyi_degdist(n=len(G.nodes()), p=0.001) for _ in range(100)]
import seaborn as sns
import janitor
data = (
pd.DataFrame(
{
"Erdos-Renyi": er_dist,
"Barabasi-Albert": ba_dist,
}
)
.melt(value_vars=["Erdos-Renyi", "Barabasi-Albert"])
.rename_columns({"variable": "Graph Model", "value": "Wasserstein Distance"})
)
sns.swarmplot(data=data, x="Graph Model", y="Wasserstein Distance")
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.