text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Read Data
Step2: 2.2 Global Temperature Anomalies
Step3: 2.3 Merge CO2 and GTA into one dataframe
Step4: 3. Visualize CO2 and GTA in one figure
Step5: 3.2 Call the function to create plot
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from matplotlib import pyplot as plt
%matplotlib inline
# Set some parameters to apply to all plots. These can be overridden
import matplotlib
# Plot size to 14" x 7"
matplotlib.rc('figure', figsize = (14, 7))
# Font size to 14
matplotlib.rc('font', size = 14)
# Do not display top and right frame lines
matplotlib.rc('axes.spines', top = False, right = False)
# Remove grid lines
matplotlib.rc('axes', grid = False)
# Set backgound color to white
matplotlib.rc('axes', facecolor = 'white')
co2 = pd.read_csv('data\co2_mm_mlo.txt',
skiprows=72,
header=None,
comment = "#",
delim_whitespace = True,
names = ["year", "month", "decimal_date", "average", "interpolated", "trend", "days"],
na_values =[-99.99, -1])
co2['YM'] = co2['year']*100 + co2['month']
co2['YM'] = pd.to_datetime(co2['YM'], format='%Y%m', errors='ignore')
co2.set_index('YM', inplace=True)
co2.drop(["year", "month", "decimal_date", "average", "trend", "days"], axis=1, inplace=True)
gta = pd.read_csv('data\gta_1958_2018.csv',
sep=",",
skiprows=5,
names = ["YM", "GTA"])
gta['YM'] = pd.to_datetime(gta['YM'], format='%Y%m', errors='ignore')
gta.set_index('YM', inplace=True)
co2gta = co2.join(gta)
def lineplot2y(x_data, x_label, y1_data, y1_color, y1_label, y2_data, y2_color, y2_label, title):
# Each variable will actually have its own plot object but they
# will be displayed in just one plot
# Create the first plot object and draw the line
_, ax1 = plt.subplots()
ax1.plot(x_data, y1_data, color = y1_color)
# Label axes
ax1.set_ylabel(y1_label, color = y1_color)
ax1.set_xlabel(x_label)
ax1.set_title(title)
# Create the second plot object, telling matplotlib that the two
# objects have the same x-axis
ax2 = ax1.twinx()
ax2.plot(x_data, y2_data, color = y2_color)
ax2.set_ylabel(y2_label, color = y2_color)
# Show right frame line
ax2.spines['right'].set_visible(True)
lineplot2y(x_data = co2gta.index
, x_label = 'Month-Year'
, y1_data = co2gta['interpolated']
, y1_color = '#539caf'
, y1_label = 'CO2(ppm)'
, y2_data = co2gta['GTA']
, y2_color = '#7663b0'
, y2_label = 'Global Temperature Anoamly ($^oC$)'
, title = 'CO2 vs. GTA')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cap avant
Step2: Cap arrere
Step3: Esquerra
Step4: Dreta
Step5: Tot correcte? Fàcil, no? Anem a comprovar-ho amb el primer exercici, però abans de canviar de pàgina, recordeu desconnectar el robot
|
<ASSISTANT_TASK:>
Python Code:
from functions import connect, forward, backward, stop, left, right, disconnect
from time import sleep
robot = 12 # canvieu pel número del vostre robot
connect(robot) # i executeu, polsant Majúscules + Enter
forward()
sleep(1)
stop()
backward()
sleep(1)
stop()
left()
sleep(1)
stop()
right()
sleep(1)
stop()
disconnect()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Installing PyCupid is as easy as
Step2: We load an Orion-KL FITS from ALMA SV with astropy
Step3: Let's import PyCupid for doing some serious data science
Step4: Inspecting the algorithms interactively
Step5: Deep inside into fellwalker
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as mpl
%matplotlib inline
# just for presentation purposes
import warnings
warnings.filterwarnings("ignore")
%%bash
pip install pycupid
from astropy.io import fits
hdulist = fits.open('FITS/ALMA00000034.fits')
hdulist.info()
print('\nTarget Object: '+hdulist[0].header['OBJECT'])
orion = hdulist[0]
tmp = orion.copy()
orion.header
import aplpy
fig = aplpy.FITSFigure(orion)
fig.show_colorscale(cmap='gist_heat')
fig.add_colorbar()
fig.show_grid()
fig.show_contour(colors='white')
# CUPID Clumping algorithms
from pycupid import clumpfind, fellwalker, gaussclumps, reinhold
from ipywidgets import interact, fixed, IntSlider, FloatSlider
# data as a NumPy array (Don't run this twice)
data = orion.data.sum(axis=(0,1))
data = np.flip(data,0)
def run_cupid(data, algorithm=fellwalker, rms=0.1, config=None):
# CUPID clumping algorithm
out = algorithm(data, rms, config)
out[out==out.min()] = 0.
# Plotting the result with APLpy
fig = mpl.figure(figsize=(15, 7))
# fig1
fig1 = aplpy.FITSFigure(orion, figure=fig, subplot=[0.1,0.1,0.35,0.8], auto_refresh=True)
fig1.show_colorscale(cmap='gist_heat')
fig1.show_grid()
fig1.show_contour(colors='white')
# fig2
tmp.data[0,0,:,:] = np.flip(out.T, 0)
fig2 = aplpy.FITSFigure(tmp, figure=fig, subplot=[0.53,0.1,0.35,0.8], auto_refresh=True)
fig2.show_colorscale(cmap='magma')
fig2.show_grid()
fig.canvas.draw()
mpl.show()
algorithms = {'gaussclumps':gaussclumps, 'clumpfind':clumpfind,
'fellwalker':fellwalker, 'reinhold':reinhold}
interact(run_cupid,
data = fixed(data),
algorithm = algorithms,
rms = FloatSlider(min=data.min(), max=data.max()*0.2, step=0.01, value=0.05),
config = fixed(None)
);
def run_fellwalker(data, RMS=0.05, CLEANITER=None, FLATSLOPE=None, MAXBAD=None, MINDIP=None,
MINHEIGHT=None, MINPIX=None, MAXJUMP=None):
# CUPID clumping algorithm
config = dict()
if CLEANITER is not None: config['CLEANITER']=CLEANITER
if FLATSLOPE is not None: config['FLATSLOPE']=FLATSLOPE
if MAXBAD is not None: config['MAXBAD']=MAXBAD
if MINDIP is not None: config['MINDIP']=MINDIP
if MINHEIGHT is not None: config['MINHEIFHT']=MINHEIGHT
if MINPIX is not None: config['MINPIX']=MINPIX
if MAXJUMP is not None: config['MAXJUMP']=MAXJUMP
out = fellwalker(data, RMS, config)
out[out==out.min()] = 0.
# Plotting the result with APLpy
fig = mpl.figure(figsize=(15, 7))
# fig1
fig1 = aplpy.FITSFigure(orion, figure=fig, subplot=[0.1,0.1,0.35,0.8], auto_refresh=True)
fig1.show_colorscale(cmap='gist_heat')
fig1.show_grid()
fig1.show_contour(colors='white')
# fig2
tmp.data[0,0,:,:] = np.flip(out.T, 0)
fig2 = aplpy.FITSFigure(tmp, figure=fig, subplot=[0.53,0.1,0.35,0.8], auto_refresh=True)
fig2.show_colorscale(cmap='magma')
fig2.show_grid()
fig.canvas.draw()
mpl.show()
interact(run_fellwalker,
data = fixed(data),
RMS = FloatSlider(min=data.min(), max=data.max()*0.2, step=0.01, value=0.05),
CLEANITER = IntSlider(min=1, max=10, step=1, value=1),
FLATSLOPE = FloatSlider(min=0, max=data.max()*0.25, step=0.01, value=0.05),
MAXBAD = FloatSlider(min=0., max=1., step=0.01, value=0.05),
MINDIP = FloatSlider(min=0., max=data.max()*0.25, step=0.01, value=0.05),
MINHEIGHT = FloatSlider(min=0., max=data.max()*0.25, step=0.01, value=0.05),
MINPIX = IntSlider(min=0, max=20, step=1, value=7),
MAXJUMP = IntSlider(min=1, max=10, step=1, value=4)
);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Phased genotypes
Step2: ChrX (phased)
Step3: Unphased genotypes
Step4: Separate biallelic and multiallelic variants, split multiallelic variants with split_multi_hts, and then union_rows the split multiallelic MT back to the biallelic MT.
Step5: After splitting multiallelic variants, we need to extract the appropriate values from the INFO array fields with a_index.
Step6: ChrX (unphased)
Step7: Separate biallelic and multiallelic variants, split multiallelic variants with split_multi_hts, and then union_rows the split multiallelic MT back to the biallelic MT.
Step8: After splitting multiallelic variants, we need to extract the appropriate values from the INFO array fields with a_index.
Step9: ChrY (unphased)
Step10: Separate biallelic and multiallelic variants, split multiallelic variants with split_multi_hts, and then union_rows the split multiallelic MT back to the biallelic MT.
Step11: After splitting multiallelic variants, we need to extract the appropriate values from the INFO array fields with a_index.
Step13: Create/update schemas
|
<ASSISTANT_TASK:>
Python Code:
ht_samples = hl.import_table(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/1000_Genomes_NYGC_30x_samples_ped_population.txt.bgz",
delimiter="\s+",
impute=True
)
ht_samples = ht_samples.annotate(
FatherID = hl.if_else(ht_samples.FatherID == "0",
hl.missing(hl.tstr),
ht_samples.FatherID),
MotherID = hl.if_else(ht_samples.MotherID == "0",
hl.missing(hl.tstr),
ht_samples.MotherID),
Sex = hl.if_else(ht_samples.Sex == 1, "male", "female")
)
ht_samples = ht_samples.key_by("SampleID")
n_rows = ht_samples.count()
n_partitions = ht_samples.n_partitions()
ht_samples = ht_samples.annotate_globals(
metadata=hl.struct(
name="1000_Genomes_HighCov_samples",
n_rows=n_rows,
n_partitions=n_partitions)
)
ht_samples.write("gs://hail-datasets-us/1000_Genomes_NYGC_30x_HighCov_samples.ht", overwrite=False)
ht_samples = hl.read_table("gs://hail-datasets-us/1000_Genomes_NYGC_30x_HighCov_samples.ht")
ht_samples.describe()
mt = hl.import_vcf(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/1000_Genomes_NYGC_30x_phased_chr{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22}_GRCh38.vcf.bgz",
reference_genome="GRCh38"
)
n_rows, n_cols = mt.count()
n_partitions = mt.n_partitions()
mt = mt.annotate_globals(
metadata=hl.struct(
name="1000_Genomes_HighCov_autosomes",
reference_genome="GRCh38",
n_rows=n_rows,
n_cols=n_cols,
n_partitions=n_partitions
)
)
# Get list of INFO fields that are arrays
known_keys = [x[0] for x in list(mt.row.info.items()) if "array" in str(x[1])]
# Extract value from INFO array fields (all arrays are length 1)
mt = mt.annotate_rows(
info = mt.info.annotate(
**{k: hl.or_missing(hl.is_defined(mt.info[k]),
mt.info[k][0])
for k in known_keys}
)
)
mt = mt.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_phased_GRCh38.mt",
overwrite=False,
_read_if_exists=True
)
mt = mt.annotate_cols(**ht_samples[mt.s])
mt = hl.sample_qc(mt)
mt = hl.variant_qc(mt)
mt.write("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/autosomes_phased.mt", overwrite=False)
mt = hl.read_matrix_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/autosomes_phased.mt")
mt.describe()
mt = hl.import_vcf(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/1000_Genomes_NYGC_30x_phased_chrX_GRCh38.vcf.bgz",
reference_genome="GRCh38"
)
n_rows, n_cols = mt.count()
n_partitions = mt.n_partitions()
mt = mt.annotate_globals(
metadata=hl.struct(
name="1000_Genomes_HighCov_chrX",
reference_genome="GRCh38",
n_rows=n_rows,
n_cols=n_cols,
n_partitions=n_partitions
)
)
# Get list of INFO fields that are arrays
known_keys = [x[0] for x in list(mt.row.info.items()) if "array" in str(x[1])]
# Extract appropriate value from INFO array fields (all arrays are length 1)
mt = mt.annotate_rows(
info = mt.info.annotate(
**{k: hl.or_missing(hl.is_defined(mt.info[k]),
mt.info[k][0])
for k in known_keys}
)
)
mt = mt.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_phased_GRCh38.mt",
overwrite=False,
_read_if_exists=True
)
mt = mt.annotate_cols(**ht_samples[mt.s])
mt = hl.sample_qc(mt)
mt = hl.variant_qc(mt)
mt.write("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/chrX_phased.mt", overwrite=False)
mt = hl.read_matrix_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/chrX_phased.mt")
mt.describe()
mt = hl.import_vcf(
("gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/1000_Genomes_NYGC_30x_"
"chr{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22}_"
"GRCh38.vcf.bgz"),
reference_genome="GRCh38",
array_elements_required=False
)
mt = mt.annotate_entries(
PL = hl.if_else(mt.PL.contains(hl.missing(hl.tint32)),
hl.missing(mt.PL.dtype),
mt.PL)
)
mt = mt.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_imported_vcf.mt",
overwrite=False,
_read_if_exists=True
)
mt = hl.read_matrix_table(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_imported_vcf.mt"
)
bi = mt.filter_rows(hl.len(mt.alleles) == 2)
bi = bi.annotate_rows(a_index=1, was_split=False)
bi = bi.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_biallelic.mt",
overwrite=False,
_read_if_exists=True
)
multi = mt.filter_rows(hl.len(mt.alleles) > 2)
multi = multi.annotate_entries(PL = hl.missing(multi.PL.dtype))
multi = multi.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_multiallelic.mt",
overwrite=False,
_read_if_exists=True
)
split = hl.split_multi_hts(multi, keep_star=True, permit_shuffle=True)
split = split.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_multiallelic_split.mt",
overwrite=False,
_read_if_exists=True
)
unioned = split.union_rows(bi)
unioned = unioned.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_unioned.mt",
overwrite=False,
_read_if_exists=True
)
unioned = unioned.repartition(12000, shuffle=True)
unioned = unioned.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_unioned_repart.mt",
overwrite=False,
_read_if_exists=True
)
unioned = hl.read_matrix_table(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/autosomes_unphased_GRCh38_unioned_repart.mt"
)
# Get list of INFO fields that are arrays
known_keys = [x[0] for x in list(unioned.row.info.items()) if "array" in str(x[1])]
# Extract appropriate values from INFO array fields after splitting
mt = unioned.annotate_rows(
info = unioned.info.annotate(
**{k: hl.or_missing(hl.is_defined(unioned.info[k]),
unioned.info[k][unioned.a_index - 1])
for k in known_keys}
)
)
n_rows, n_cols = mt.count()
n_partitions = mt.n_partitions()
mt = mt.annotate_globals(
metadata=hl.struct(
name="1000_Genomes_HighCov_autosomes",
reference_genome="GRCh38",
n_rows=n_rows,
n_cols=n_cols,
n_partitions=n_partitions
)
)
ht_samples = hl.read_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/samples.ht")
mt = mt.annotate_cols(**ht_samples[mt.s])
mt = hl.sample_qc(mt)
mt = hl.variant_qc(mt)
mt.write("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/autosomes_unphased.mt", overwrite=False)
mt = hl.read_matrix_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/autosomes_unphased.mt")
mt.describe()
mt = hl.import_vcf(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/1000_Genomes_NYGC_30x_chrX_GRCh38.vcf.bgz",
reference_genome="GRCh38",
array_elements_required=False
)
mt = mt.annotate_entries(
PL = hl.if_else(mt.PL.contains(hl.missing(hl.tint32)),
hl.missing(mt.PL.dtype),
mt.PL)
)
mt = mt.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_imported_vcf.mt",
overwrite=False,
_read_if_exists=True
)
mt = hl.read_matrix_table(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_imported_vcf.mt"
)
bi = mt.filter_rows(hl.len(mt.alleles) == 2)
bi = bi.annotate_rows(a_index=1, was_split=False)
bi = bi.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_biallelic.mt",
overwrite=False,
_read_if_exists=True
)
multi = mt.filter_rows(hl.len(mt.alleles) > 2)
multi = multi.annotate_entries(PL = hl.missing(multi.PL.dtype))
multi = multi.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_multiallelic.mt",
overwrite=False,
_read_if_exists=True
)
split = hl.split_multi_hts(multi, keep_star=True, permit_shuffle=True)
split = split.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_multiallelic_split.mt",
overwrite=False,
_read_if_exists=True
)
unioned = split.union_rows(bi)
unioned = unioned.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_unioned.mt",
overwrite=False,
_read_if_exists=True
)
unioned = unioned.repartition(512, shuffle=True)
unioned = unioned.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_unioned_repart.mt",
overwrite=False,
_read_if_exists=True
)
unioned = hl.read_matrix_table(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrX_unphased_GRCh38_unioned_repart.mt"
)
# Get list of INFO fields that are arrays
known_keys = [x[0] for x in list(unioned.row.info.items()) if "array" in str(x[1])]
# Extract appropriate values from INFO array fields after splitting
mt = unioned.annotate_rows(
info = unioned.info.annotate(
**{k: hl.or_missing(hl.is_defined(unioned.info[k]),
unioned.info[k][unioned.a_index - 1])
for k in known_keys}
)
)
n_rows, n_cols = mt.count()
n_partitions = mt.n_partitions()
mt = mt.annotate_globals(
metadata=hl.struct(
name="1000_Genomes_HighCov_chrX",
reference_genome="GRCh38",
n_rows=n_rows,
n_cols=n_cols,
n_partitions=n_partitions
)
)
ht_samples = hl.read_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/samples.ht")
mt = mt.annotate_cols(**ht_samples[mt.s])
mt = hl.sample_qc(mt)
mt = hl.variant_qc(mt)
mt.write("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/chrX_unphased.mt", overwrite=False)
mt = hl.read_matrix_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/chrX_unphased.mt")
mt.describe()
mt = hl.import_vcf(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/1000_Genomes_NYGC_30x_chrY_GRCh38.vcf.bgz",
reference_genome="GRCh38",
array_elements_required=False
)
mt = mt.annotate_entries(
PL = hl.if_else(mt.PL.contains(hl.missing(hl.tint32)),
hl.missing(mt.PL.dtype),
mt.PL)
)
mt = mt.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_imported_vcf.mt",
overwrite=False,
_read_if_exists=True
)
mt = hl.read_matrix_table(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_imported_vcf.mt"
)
bi = mt.filter_rows(hl.len(mt.alleles) == 2)
bi = bi.annotate_rows(a_index=1, was_split=False)
bi = bi.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_biallelic.mt",
overwrite=False,
_read_if_exists=True
)
multi = mt.filter_rows(hl.len(mt.alleles) > 2)
multi = multi.annotate_entries(PL = hl.missing(multi.PL.dtype))
multi = multi.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_multiallelic.mt",
overwrite=False,
_read_if_exists=True
)
split = hl.split_multi_hts(multi, keep_star=True, permit_shuffle=True)
split = split.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_multiallelic_split.mt",
overwrite=False,
_read_if_exists=True
)
unioned = split.union_rows(bi)
unioned = unioned.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_unioned.mt",
overwrite=False,
_read_if_exists=True
)
unioned = unioned.repartition(8, shuffle=True)
unioned = unioned.checkpoint(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_unioned_repart.mt",
overwrite=False,
_read_if_exists=True
)
unioned = hl.read_matrix_table(
"gs://hail-datasets-tmp/1000_Genomes_NYGC_30x/checkpoints/chrY_unphased_GRCh38_unioned_repart.mt"
)
# Get list of INFO fields that are arrays
known_keys = [x[0] for x in list(unioned.row.info.items()) if "array" in str(x[1])]
# Extract appropriate values from INFO array fields after splitting
mt = unioned.annotate_rows(
info = unioned.info.annotate(
**{k: hl.or_missing(hl.is_defined(unioned.info[k]),
unioned.info[k][unioned.a_index - 1])
for k in known_keys}
)
)
n_rows, n_cols = mt.count()
n_partitions = mt.n_partitions()
mt = mt.annotate_globals(
metadata=hl.struct(
name="1000_Genomes_HighCov_chrY",
reference_genome="GRCh38",
n_rows=n_rows,
n_cols=n_cols,
n_partitions=n_partitions
)
)
ht_samples = hl.read_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/samples.ht")
mt = mt.annotate_cols(**ht_samples[mt.s])
mt = hl.sample_qc(mt)
mt = hl.variant_qc(mt)
mt.write("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/chrY_unphased.mt", overwrite=False)
mt = hl.read_matrix_table("gs://hail-datasets-us/1000_Genomes/NYGC_30x/GRCh38/chrY_unphased.mt")
mt.describe()
import json
import os
import textwrap
output_dir = os.path.abspath("../../hail/python/hail/docs/datasets/schemas")
datasets_path = os.path.abspath("../../hail/python/hail/experimental/datasets.json")
with open(datasets_path, "r") as f:
datasets = json.load(f)
names = datasets.keys()
for name in [name for name in names if "1000_Genomes_HighCov" in name]:
versions = sorted(set(dataset["version"] for dataset in datasets[name]["versions"]))
if not versions:
versions = [None]
reference_genomes = sorted(set(dataset["reference_genome"] for dataset in datasets[name]["versions"]))
if not reference_genomes:
reference_genomes = [None]
print(name)
# Create schemas for unphased versions, since phased entries only have GT
if name == "1000_Genomes_HighCov_chrY":
v = versions[0]
else:
v = versions[1]
print(v)
print(reference_genomes[0] + "\n")
path = [dataset["url"]["gcp"]["us"]
for dataset in datasets[name]["versions"]
if all([dataset["version"] == v,
dataset["reference_genome"] == reference_genomes[0]])]
assert len(path) == 1
path = path[0]
if path.endswith(".ht"):
table = hl.methods.read_table(path)
table_class = "hail.Table"
else:
table = hl.methods.read_matrix_table(path)
table_class = "hail.MatrixTable"
description = table.describe(handler=lambda x: str(x)).split("\n")
description = "\n".join([line.rstrip() for line in description])
template = .. _{dataset}:
{dataset}
{underline1}
* **Versions:** {versions}
* **Reference genome builds:** {ref_genomes}
* **Type:** :class:`{class}`
Schema ({version0}, {ref_genome0})
{underline2}
.. code-block:: text
{schema}
context = {
"dataset": name,
"underline1": len(name) * "=",
"version0": v,
"ref_genome0": reference_genomes[0],
"versions": ", ".join([str(version) for version in versions]),
"ref_genomes": ", ".join([str(reference_genome) for reference_genome in reference_genomes]),
"underline2": len("".join(["Schema (", str(v), ", ", str(reference_genomes[0]), ")"])) * "~",
"schema": textwrap.indent(description, " "),
"class": table_class
}
with open(output_dir + f"/{name}.rst", "w") as f:
f.write(template.format(**context).strip())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Term Frequency and Inverse Document Frequency
Step2: Let us get in how many documents (each title) does the word occur
Step3: Let us compute the tf-idf
Step4: now let us plot a word cloud to see the prominence of the word
Step5: Topic modelling (LDA - Latent Dirichlet allocation)
Step6: Generating the document term matrix
Step7: Loading the vocabulary
Step8: Finding the key words that come together for each topic
Step9: Finding the Topic for each Document
Step10: Sentiment Analysis
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import re
df = pd.read_csv('data_tau_ta.csv')
df.head()
df.shape
import nltk
from nltk.corpus import stopwords
stop = stopwords.words('english')
stop.extend(('.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}','/','-'))
tokens_list = df['tokens'].tolist()
tokens_list
# Let us get the frequency count
frequency_words = {}
for data in tokens_list:
data = data.replace("[","")
data = data.replace("]","")
data = data.replace("'","")
data_list = data.split(',')
print(data_list)
for token in data_list:
token = token.rstrip()
token = token.lstrip()
if token not in stop:
if token in frequency_words:
count = frequency_words[token]
count = count + 1
frequency_words[token] = count
else:
frequency_words[token] = 1
frequency_words['data']
df_tfidf = pd.DataFrame(data=list(frequency_words.items()),columns=['word','tf'])
df_tfidf.head()
df_tfidf.sort_values(ascending=False, by = "tf", inplace=True)
df_tfidf.head()
def get_documents_count(row):
document_counter = 0
word = row['word']
for document in df.tokens:
document = document.replace("'",'')
document = document.replace("[",'')
document = document.replace("]",'')
document = document.split(',')
document = map(str.strip,document)
if word in document:
document_counter = document_counter + 1
return document_counter
df_tfidf['document_count'] = df_tfidf.apply(get_documents_count,axis=1)
df_tfidf.head()
df_tfidf.tail()
# we already have the count of all the documents
total_docs = df.shape[0]
total_docs
import math
from wordcloud import WordCloud
import matplotlib.pyplot as plt
%matplotlib inline
def compute_tfidf(row):
idf = math.log10(total_docs/row['document_count'])
return row['tf'] * idf
df_tfidf['tfidf'] = df_tfidf.apply(compute_tfidf,axis=1)
df_tfidf.head()
df_tfidf.tail()
df_tfidf.sort_values(by='tfidf',ascending=True,inplace=True)
df_tfidf.head()
df_tfidf.replace(to_replace=0.0,value=0.1,inplace=True)
df_tfidf.tail()
df_tfidf.set_index('word', inplace=True)
df_tfidf.head()
wordcloud = WordCloud()
word_tfidf = df_tfidf['tfidf'].to_dict()
wordcloud.generate_from_frequencies(word_tfidf.items())
plt.figure(figsize=(14,10))
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
import lda
import numpy as np
import lda.datasets
import sklearn.feature_extraction.text as text
vectorizer = text.CountVectorizer(input='content', stop_words='english', min_df=1)
dtm = vectorizer.fit_transform(df.title).toarray()
dtm
vocab = np.array(vectorizer.get_feature_names())
vocab[:20]
titles = df.title
model = lda.LDA(n_topics=5, n_iter=500, random_state=1)
model.fit(dtm)
model.topic_word_
topic_word = model.topic_word_
topic_word
n_top_words = 8
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-n_top_words:-1]
print('Topic {}: {}'.format(i, ' '.join(topic_words)))
doc_topic = model.doc_topic_
for n in range(10):
topic_most_pr = doc_topic[n].argmax()
print("topic: {} , {}".format(topic_most_pr,titles[n]))
from nltk.classify import NaiveBayesClassifier
import math
import collections
pos_features = []
neg_features = []
def make_full_dict(word):
return dict([(word, True)])
with open('postive_words.txt','r') as posFile:
lines = posFile.readlines()
for line in lines:
pos_features.append([make_full_dict(line.rstrip()),'pos'])
pos_features
with open('negative_words.txt','r',encoding='utf-8') as negFile:
lines = negFile.readlines()
for line in lines:
neg_features.append([make_full_dict(line.rstrip()),'neg'])
neg_features
len(pos_features),len(neg_features)
trainFeatures = pos_features + neg_features
trainFeatures
classifier = NaiveBayesClassifier.train(trainFeatures)
referenceSets = collections.defaultdict(set)
testSets = collections.defaultdict(set)
def make_full_dict_sent(words):
return dict([(word, True) for word in words])
import re
neg_test = 'I hate data science'
title_words = re.findall(r"[\w']+|[.,!?;]",
'The Daily Mail stole My Visualization, Twice')
title_words
test=[]
test.append([make_full_dict_sent(title_words),''])
test
for i, (features, label) in enumerate(test):
predicted = classifier.classify(features)
print(predicted)
for doc in df.title:
title_words = re.findall(r"[\w']+|[.,!?;]", doc.lower())
test = []
test.append([make_full_dict_sent(title_words),''])
for i, (features, label) in enumerate(test):
predicted = classifier.classify(features)
print(predicted,doc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Data object is initialized with the path to the directory of .pickle files. On creation it reads in the pickle files, but does not transform the data.
Step2: The data object can be accessed like a dictionary to the underlying Dataframes. These will be transformed on their first access into a normalized form. (This might take awhile for the first access)
|
<ASSISTANT_TASK:>
Python Code:
import ICO
import os
import pandas as pd
import time
data = ICO.Data(os.getcwd()+'/data/')
start = time.time()
data["all_encounter_data"]
print(time.time() - start)
data["all_encounter_data"].describe(include='all')
data["all_encounter_data"].columns.values
data['all_encounter_data'].shape[0]
data['all_encounter_data'].to_pickle('all_encounter_data_Dan_20170415.pickle')
start = time.time()
data["all_person_data"]
print(time.time() - start)
data["all_person_data"].describe(include='all')
data["all_person_data"].columns.values
data['all_person_data'].shape[0]
data['all_person_data'].to_pickle('all_person_data_Dan_20170415.pickle')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1:
Step2: Notice that orig_time is None, because we haven't specified it. In
Step3: Since the example data comes from a Neuromag system that starts counting
Step4: If you know that your annotation onsets are relative to some other time, you
Step5: <div class="alert alert-info"><h4>Note</h4><p>If your annotations fall outside the range of data times in the
Step6: The three annotations appear as differently colored rectangles because they
Step7: The colored rings are clickable, and determine which existing label will be
Step8: Notice that it is possible to create overlapping annotations, even when they
Step9: You can also iterate over the annotations within an
Step10: Note that iterating, indexing and slicing
Step11: Reading and writing Annotations to/from a file
|
<ASSISTANT_TASK:>
Python Code:
import os
from datetime import timedelta
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60).load_data()
my_annot = mne.Annotations(onset=[3, 5, 7],
duration=[1, 0.5, 0.25],
description=['AAA', 'BBB', 'CCC'])
print(my_annot)
raw.set_annotations(my_annot)
print(raw.annotations)
# convert meas_date (a tuple of seconds, microseconds) into a float:
meas_date = raw.info['meas_date']
orig_time = raw.annotations.orig_time
print(meas_date == orig_time)
time_of_first_sample = raw.first_samp / raw.info['sfreq']
print(my_annot.onset + time_of_first_sample)
print(raw.annotations.onset)
time_format = '%Y-%m-%d %H:%M:%S.%f'
new_orig_time = (meas_date + timedelta(seconds=50)).strftime(time_format)
print(new_orig_time)
later_annot = mne.Annotations(onset=[3, 5, 7],
duration=[1, 0.5, 0.25],
description=['DDD', 'EEE', 'FFF'],
orig_time=new_orig_time)
raw2 = raw.copy().set_annotations(later_annot)
print(later_annot.onset)
print(raw2.annotations.onset)
fig = raw.plot(start=2, duration=6)
fig.canvas.key_press_event('a')
new_annot = mne.Annotations(onset=3.75, duration=0.75, description='AAA')
raw.set_annotations(my_annot + new_annot)
raw.plot(start=2, duration=6)
print(raw.annotations[0]) # just the first annotation
print(raw.annotations[:2]) # the first two annotations
print(raw.annotations[(3, 2)]) # the fourth and third annotations
for ann in raw.annotations:
descr = ann['description']
start = ann['onset']
end = ann['onset'] + ann['duration']
print("'{}' goes from {} to {}".format(descr, start, end))
# later_annot WILL be changed, because we're modifying the first element of
# later_annot.onset directly:
later_annot.onset[0] = 99
# later_annot WILL NOT be changed, because later_annot[0] returns a copy
# before the 'onset' field is changed:
later_annot[0]['onset'] = 77
print(later_annot[0]['onset'])
raw.annotations.save('saved-annotations.csv')
annot_from_file = mne.read_annotations('saved-annotations.csv')
print(annot_from_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's pick a descriptor. Allowed types are
Step2: and load the databases with the descriptors (input) and the correct charge densities (output). Databases are quite big, so we can decide how many samples to use for training.
Step3: Training
Step4: The evaluation function computes the distance between each validation sample $T_i$ and the training ones $M_i$
|
<ASSISTANT_TASK:>
Python Code:
# INITIAL DEFINITIONS
from pyKRR import KRRsolver # import our KRR solver object
import numpy, random
import numpy, math, random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.sparse import load_npz
# TYPE is the descriptor type
TYPE = "cnt"
#show descriptor details
print("\nDescriptor details")
desc = open("./data/descriptor."+TYPE.split('.')[0]+".txt","r").readlines()
for l in desc: print(l.strip())
print(" ")
# load input/output data
trainIn = load_npz("./data/energy.input."+TYPE+".npz").toarray()
trainOut = numpy.load("./data/energy.output.npy")
trainIn = trainIn.astype(dtype=numpy.float64, casting='safe')
# decide how many samples to take from the database
samples = min(trainIn.shape[0], 1000) # change the number here!
vsamples = min(trainIn.shape[0]-samples,1000)
print("training samples: "+str(samples))
print("validation samples: "+str(vsamples))
# split between training and validation
validIn = trainIn[samples:samples+vsamples]
validOut = trainOut[samples:samples+vsamples]
trainIn = trainIn[0:samples]
trainOut = trainOut[0:samples]
# show the first few descriptors
print("\nDescriptors for the first 5 molecules:")
print(trainIn[0:5])
# create a new solver
solver = KRRsolver()
# set the regularisation hyperparameter
solver.alpha = 0.1
# call its training function with the training inputs and outputs
# WARNING: building the kernel matrix is O(N^2)
# WARNING: inverting the kernel matris is O(N^3)
# Keep the training set small
solver.Train(trainIn, trainOut)
# get a list of predicted outputs for the validation inputs
predict = solver.Evaluate(validIn)
print("Mean Abs Error (validation): " + str((numpy.abs(predict-validOut)).mean()))
# do the regression plot
plt.plot(validOut, predict, 'o')
plt.plot([numpy.min(validOut),numpy.max(validOut)], [numpy.min(validOut),numpy.max(validOut)], '-')
plt.xlabel('correct output')
plt.ylabel('KRR output')
plt.show()
# check the distribution of energies in the training set
plt.hist(validOut, bins=20, density=True, alpha=0.5, facecolor='gray')
plt.hist(trainOut, bins=20, density=True, alpha=0.2, facecolor='red')
plt.xlabel("Energy [H]")
plt.ylabel("Probability")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q2.
Step2: Q3.
Step3: Q4
|
<ASSISTANT_TASK:>
Python Code:
## Solution 1.
import numpy as np
A = np.array([[0, 0, 1, 0, 0, 1, 0, 0], #A
[0, 0, 0, 0, 1, 0, 0, 1], #B
[1, 0, 0, 1, 0, 1, 0, 0], #C
[0, 0, 1, 0, 1, 0, 1, 0], #D
[0, 1, 0, 1, 0, 0, 0, 1], #E
[1, 0, 1, 0, 0, 0, 1, 0], #F
[0, 0, 0, 1, 0, 1, 0, 1], #G
[0, 1, 0, 0, 1, 0, 1, 0] #H
])
print "Adjacent Matrix A:"
print A
print "Zero elements in A:"
print A.shape[1]* A.shape[0] - A.sum()
print "Sum as well as Non zero elements in A:"
print A.sum()
print "=========================================="
D = np.diag([2,2,3,3,3,3,3,3])
print "Degree Matrix D:"
print D
print "=========================================="
L = D - A
print "Laplacian Matrix L:"
print L
import numpy as np
A = np.array([[0, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 0]
])
D = np.diag([2, 2, 2, 3, 2, 2])
L = (D - A)
print "=========================================="
print "Graph Laplacian Matrix: "
print L
values, vectors = np.linalg.eig(L)
print "Eigen values: "
values = np.around(values, decimals=4)
print values
print "=========================================="
print "Eigen vectors: "
vectors = np.around(vectors, decimals=4)
print vectors
print "=========================================="
print "Second Smallest Eigen vectors: "
print vectors[:, 2]
print "=========================================="
print "Mean of each row in vectors: "
print np.mean(vectors[:, 2])
def cal_supprise(current_t = 75):
common = ( current_t / 10 ) % 10
more_set = ( current_t ) % 10
less_set = 10 - more_set
return more_set * (common+1)**2 + less_set * (common**2)
def AMS(time_list, current_t = 75):
estimates = []
for t in time_list:
delta = current_t - t
elem = t % 10
threshold = current_t % 10
common = (delta / 10) % 10
if elem > threshold:
estimates.append( ( 2*common - 1)*current_t )
else:
estimates.append( ( 2*common + 1)*current_t )
return estimates
print cal_supprise()
print np.mean(AMS([31,32,44]))
print np.mean(AMS([14,35,42]))
print np.mean(AMS([32,48,50]))
print np.mean(AMS([22,42,62]))
#buffer
def hash1(x):
return (3*x + 7) % 11
def printBinary(x):
print "- The binary result of {0} is:".format(str(x)) + str(bin(x))
def countTrailingZerosInBinary(num):
bnum = str(bin(num))
return len(bnum) - len(bnum.rstrip('0'))
def FlagoletMatrtin(hash_list):
maxnum = 0
for val in hash_list:
num = countTrailingZerosInBinary(val)
if num > maxnum:
maxnum = num
return 2**maxnum
print FlagoletMatrtin(([hash1(x) for x in [1,3,6,8]]))
print FlagoletMatrtin(([hash1(x) for x in [2,4,6,10]]))
print FlagoletMatrtin(([hash1(x) for x in [2,6,8,10]]))
print FlagoletMatrtin(([hash1(x) for x in [3,4,8,10]]))
print "================="
print FlagoletMatrtin(([hash1(x) for x in [2,6,8,9]]))
print FlagoletMatrtin(([hash1(x) for x in [4,6,9,10]]))
print FlagoletMatrtin(([hash1(x) for x in [1,5,8,9]]))
print FlagoletMatrtin(([hash1(x) for x in [1,6,7,10]]))
print "================="
print FlagoletMatrtin(([hash1(x) for x in [1,2,3,9]]))
print FlagoletMatrtin(([hash1(x) for x in [1,3,9,10]]))
print FlagoletMatrtin(([hash1(x) for x in [3,4,8,10]]))
print FlagoletMatrtin(([hash1(x) for x in [4,6,9,10]]))
print "================="
print FlagoletMatrtin(([hash1(x) for x in [1,4,7,9]]))
print FlagoletMatrtin(([hash1(x) for x in [4,6,9,10]]))
print FlagoletMatrtin(([hash1(x) for x in [1,6,7,10]]))
print FlagoletMatrtin(([hash1(x) for x in [4,5,6,10]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data
Step2: We will reduce the dimensionality of the data as it is pulled in to remove an empty time
Step3: To properly interpolate to isentropic coordinates, the function must know the desired output
Step4: Conversion to Isentropic Coordinates
Step5: The output is an xarray Dataset
Step6: Note that the units on our wind variables are not ideal for plotting. Instead, let us
Step7: Converting to Relative Humidity
Step8: Plotting the Isentropic Analysis
Step9: Montgomery Streamfunction
|
<ASSISTANT_TASK:>
Python Code:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
data = xr.open_dataset(get_test_data('narr_example.nc', False))
print(list(data.variables))
data = data.squeeze().set_coords(['lon', 'lat'])
isentlevs = [296.] * units.kelvin
isent_data = mpcalc.isentropic_interpolation_as_dataset(
isentlevs,
data['Temperature'],
data['u_wind'],
data['v_wind'],
data['Specific_humidity'],
data['Geopotential_height']
)
isent_data
isent_data['u_wind'] = isent_data['u_wind'].metpy.convert_units('kt')
isent_data['v_wind'] = isent_data['v_wind'].metpy.convert_units('kt')
isent_data['Relative_humidity'] = mpcalc.relative_humidity_from_specific_humidity(
isent_data['pressure'],
isent_data['temperature'],
isent_data['Specific_humidity']
).metpy.convert_units('percent')
# Set up our projection and coordinates
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
lon = isent_data['pressure'].metpy.longitude
lat = isent_data['pressure'].metpy.latitude
# Coordinates to limit map area
bounds = [(-122., -75., 25., 50.)]
# Choose a level to plot, in this case 296 K (our sole level in this example)
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 245, size='large')
ax = fig.add_subplot(1, 1, 1, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the surface
clevisent = np.arange(0, 1000, 25)
cs = ax.contour(lon, lat, isent_data['pressure'].isel(isentropic_level=level),
clevisent, colors='k', linewidths=1.0, linestyles='solid',
transform=ccrs.PlateCarree())
cs.clabel(fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True,
use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isent_data['Relative_humidity'].isel(isentropic_level=level),
range(10, 106, 5), cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs
ax.barbs(lon.values, lat.values, isent_data['u_wind'].isel(isentropic_level=level).values,
isent_data['v_wind'].isel(isentropic_level=level).values, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title(f'{isentlevs[level]:~.0f} Isentropic Pressure (hPa), Wind (kt), '
'Relative Humidity (percent)', loc='left')
add_timestamp(ax, isent_data['time'].values.astype('datetime64[ms]').astype('O'),
y=0.02, high_contrast=True)
fig.tight_layout()
# Calculate Montgomery Streamfunction and scale by 10^-2 for plotting
msf = mpcalc.montgomery_streamfunction(
isent_data['Geopotential_height'],
isent_data['temperature']
).values / 100.
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 250, size='large')
ax = plt.subplot(111, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.5)
# Plot the surface
clevmsf = np.arange(0, 4000, 5)
cs = ax.contour(lon, lat, msf[level, :, :], clevmsf,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
cs.clabel(fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True,
use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isent_data['Relative_humidity'].isel(isentropic_level=level),
range(10, 106, 5), cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs
ax.barbs(lon.values, lat.values, isent_data['u_wind'].isel(isentropic_level=level).values,
isent_data['v_wind'].isel(isentropic_level=level).values, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title(f'{isentlevs[level]:~.0f} Montgomery Streamfunction '
r'($10^{-2} m^2 s^{-2}$), Wind (kt), Relative Humidity (percent)', loc='left')
add_timestamp(ax, isent_data['time'].values.astype('datetime64[ms]').astype('O'),
y=0.02, pretext='Valid: ', high_contrast=True)
fig.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classification with linear discrimant analysis
Step2: Look at performance over time
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from mne import Epochs, pick_types, events_from_annotations
from mne.channels import make_standard_montage
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
eegbci.standardize(raw) # set channel names
montage = make_standard_montage('standard_1005')
raw.set_montage(montage)
# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))
# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2
# Define a monte-carlo cross-validation generator (reduce variance):
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
cv = ShuffleSplit(10, test_size=0.2, random_state=42)
cv_split = cv.split(epochs_data_train)
# Assemble a classifier
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=4, reg=None, log=True, norm_trace=False)
# Use scikit-learn Pipeline with cross_val_score function
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
csp.plot_patterns(epochs.info, ch_type='eeg', units='Patterns (AU)', size=1.5)
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv_split:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
lda.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(lda.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The DCT Domain
Step2: Back to the Pixel Domain
Step3: Subsampling Time
Step4: Comparing with spatial domain subsampling
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sys
import y4m
import matplotlib.pyplot as plt
import numpy as np
def decode_y4m_buffer(frame):
W, H = frame.headers['W'], frame.headers['H']
Wdiv2, Hdiv2 = W // 2, H // 2
C, buf = frame.headers['C'], frame.buffer
A, Adiv2, div2 = W * H, Hdiv2 * Wdiv2, (Hdiv2, Wdiv2)
dtype, scale = 'uint8', 1.
if C.endswith('p10'):
dtype, scale, A = 'uint16', 4., A * 2
Y = (np.ndarray((H, W), dtype, buf))
Cb = (np.ndarray(div2, dtype, buf, A))
Cr = (np.ndarray(div2, dtype, buf, A + Adiv2))
return Y, Cb, Cr
def process():
pass
def y4mread(file):
parser = y4m.Reader(process(), verbose=True)
frame = None
with open(file, 'rb') as f:
while True:
data = f.read(2048)
if not data:
break
parser._data += data
if parser._stream_headers is None:
parser._decode_stream_headers()
if frame is None:
frame = parser._decode_frame()
else :
break
Y, Cb, Cr = decode_y4m_buffer(frame)
return Y, Cb, Cr
Y, Cb, Cr = y4mread("images/owl.y4m")
plt.figure(figsize=(15,10))
y_height, y_width = Y.shape
cb_height, cb_width = Cb.shape
cr_height, cr_width = Cr.shape
plt.subplot(1,3,1)
plt.title("Luma (%dx%d)" % (y_width, y_height))
plt.imshow(Y, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
plt.subplot(1,3,2)
plt.title("Cb (%dx%d)" % (cb_width, cb_height))
plt.imshow(Cb, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
plt.subplot(1,3,3)
plt.title("Cr (%dx%d)" % (cr_width, cr_height))
plt.imshow(Cr, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
from scipy.fftpack import dct
block_size = 8
Y_dct = np.zeros((y_height, y_width))
for y in range(0,y_height - (block_size-1), block_size):
yRange = np.arange(y,y+block_size)
for x in range(0, y_width - (block_size-1), block_size):
xRange = np.arange(x,x+block_size)
Y_dct[np.ix_(yRange,xRange)] = dct(dct(Y[np.ix_(yRange,xRange)].T, norm='ortho').T, norm='ortho')
plt.imshow(Y_dct);
from scipy.fftpack import idct
block_size = 8
Y_idct = np.zeros((y_height, y_width))
for y in range(0,y_height - block_size, block_size):
yRange = np.arange(y,y+block_size)
for x in range(0, y_width - block_size, block_size):
xRange = np.arange(x,x+block_size)
Y_idct[np.ix_(yRange,xRange)] = idct(idct(Y_dct[np.ix_(yRange,xRange)].T, norm='ortho').T, norm='ortho')
plt.imshow(Y_idct, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
## DCT Subsampling
from scipy.fftpack import idct
sub_block_size = 4
y_sub_height = y_height // 2
y_sub_width = y_width // 2
Y_sub = np.zeros((y_sub_height, y_sub_width))
yy = 0
for y in range(0,y_sub_height - (sub_block_size-1), sub_block_size):
y_sub_range = range(y,y+sub_block_size)
y_range = range(yy,yy+sub_block_size)
xx = 0
for x in range(0, y_sub_width - (sub_block_size-1), sub_block_size):
x_sub_range = range(x,x+sub_block_size)
x_range = range(xx, xx+sub_block_size)
Y_sub[np.ix_(y_sub_range, x_sub_range)] = idct(idct(Y_dct[np.ix_(y_range, x_range)].T, norm='ortho').T, norm='ortho')
xx = xx + block_size
yy = yy + block_size
Y_sub_scaled = Y_sub // 2;
plt.figure(figsize=(15,10))
plt.subplot(1,2,1)
plt.title('Inverse DCT of the top left 4x4 blocks in each of the 8x8 blocks')
plt.imshow(Y_sub, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
plt.subplot(1,2,2)
plt.title('Same image with pixel values divided by 2')
plt.imshow(Y_sub_scaled, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
plt.figure(figsize=(15,10))
plt.subplot(1,2,1)
plt.title('Pixel Domain Subsampling (no filtering)')
plt.imshow(Y[::2, ::2], cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
plt.subplot(1,2,2)
plt.title('DCT Domain Subsampling')
plt.imshow(Y_sub_scaled, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 255, aspect='equal', interpolation='nearest');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step8: Extending values on vertices to a discrete gradient vector field
Step9: Let us test the above function on the simple example
Step12: Now let us implement an extension algorithm. We are leaving out the cancelling step for clarity.
Step13: Let us test the algorithm on the example from the previous step (full triangle).
|
<ASSISTANT_TASK:>
Python Code:
from itertools import combinations, chain
def simplex_closure(a):
Returns the generator that iterating over all subsimplices (of all dimensions) in the closure
of the simplex a. The simplex a is also included.
return chain.from_iterable([combinations(a, l) for l in range(1, len(a) + 1)])
def closure(K):
Add all missing subsimplices to K in order to make it a simplicial complex.
return list({s for a in K for s in simplex_closure(a)})
def contained(a, b):
Returns True is a is a subsimplex of b, False otherwise.
return all((v in b for v in a))
def star(s, cx):
Return the set of all simplices in the cx that contais simplex s.
return {p for p in cx if contained(s, p)}
def intersection(s1, s2):
Return the intersection of s1 and s2.
return list(set(s1).intersection(s2))
def link(s, cx):
Returns link of the simplex s in the complex cx.
# Link consists of all simplices from the closed star that have
# empty intersection with s.
return [c for c in closure(star(s, cx)) if not intersection(s, c)]
def simplex_value(s, f, aggregate):
Return the value of f on vertices of s
aggregated by the aggregate function.
return aggregate([f[v] for v in s])
def lower_link(s, cx, f):
Return the lower link of the simplex s in the complex cx.
The dictionary f is the mapping from vertices (integers)
to the values on vertices.
sval = simplex_value(s, f, min)
return [s for s in link(s, cx)
if simplex_value(s, f, max) < sval]
K = closure([(1, 2, 3)])
f = {1: 0, 2: 1, 3: 2}
for v in (1, 2, 3):
print"{0}: {1}".format((v,), lower_link((v,), K, f))
def join(a, b):
Return the join of 2 simplices a and b.
return tuple(sorted(set(a).union(b)))
def extend(K, f):
Extend the field to the complex K.
Function on vertices is given in f.
Returns the pair V, C, where V is the dictionary containing discrete gradient vector field
and C is the list of all critical cells.
V = dict()
C = []
for v in (s for s in K if len(s)==1):
ll = lower_link(v, K, f)
if len(ll) b== 0:
C.append(v)
else:
V1, C1 = extend(ll, f)
mv, mc = min([(f[c[0]], c) for c in C1 if len(c)==1])
V[v] = join(v, mc)
for c in (c for c in C1 if c != mc):
C.append(join(v, c))
for a, b in V1.items():
V[join(a, v)] = join(b, v)
return V, C
K = closure([(1, 2, 3)])
f = {1: 0, 2: 1, 3: 2}
extend(K, f)
K = closure([(1, 2, 3), (2, 3, 4)])
f = {1: 0, 2: 1, 3: 2, 4: 0}
extend(K, f)
K = closure([(1, 2, 3), (2, 3, 4)])
f = {1: 0, 2: 1, 3: 2, 4: 3}
extend(K, f)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'niwa', 'sandbox-1', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Aod Plus Ccn
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 13.3. External Mixture
Step59: 14. Optical Radiative Properties --> Radiative Scheme
Step60: 14.2. Shortwave Bands
Step61: 14.3. Longwave Bands
Step62: 15. Optical Radiative Properties --> Cloud Interactions
Step63: 15.2. Twomey
Step64: 15.3. Twomey Minimum Ccn
Step65: 15.4. Drizzle
Step66: 15.5. Cloud Lifetime
Step67: 15.6. Longwave Bands
Step68: 16. Model
Step69: 16.2. Processes
Step70: 16.3. Coupling
Step71: 16.4. Gas Phase Precursors
Step72: 16.5. Scheme Type
Step73: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'gfdl-esm2m', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_aod_plus_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.external_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now run the spec in a terminal using
Step2: Defining endpoints in OAS3
Step3: Solution on the unimplemented method
Step4: Exercise
|
<ASSISTANT_TASK:>
Python Code:
# At first ensure connexion is installed
# together with the swagger module used to render the OAS3 spec
# in the web-ui
!pip install connexion[swagger-ui] connexion
# A request on a generic PATH on the server returns a
# nicely formatted and explicative error.
# Remember that we haven't already defined an operation.
!curl http://0.0.0.0:5000 -kv
render_markdown(f'''
Open the [documentation URL]({api_server_url('ui')}) and check the outcome!
Play a bit with Swagger UI.''')
# Exercise: what's the expected output of the following command?
!curl http://0.0.0.0:5000/datetime/v1/status
# Exercise: what happens if you GET an unexisting path?
!curl http://0.0.0.0:5000/datetime/v1/MISSING
print(show_component('https://teamdigitale.github.io/openapi/0.0.5/definitions.yaml#/schemas/Problem'))
# Exercise: use the yaml and requests libraries
# to download the Problem schema
from requests import get
ret = get('https://teamdigitale.github.io/openapi/0.0.5/definitions.yaml')
# Yaml parse the definitions
definitions = yaml.safe_load(ret.content)
# Nicely print the Problem schema
print(yaml.dump(definitions['schemas']['Problem']))
### Exercise
# Read the definitions above
# - https://teamdigitale.github.io/openapi/0.0.5/definitions.yaml
#
# Then use this cell to list all the structures present in definitions
for sections, v in definitions.items():
for items, vv in v.items():
print(f'{sections}.{items}')
## Exercise
#Test the new setup
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To demonstrate the use of penalized Cox models we are going to use the breast cancer data, which contains the expression levels of 76 genes, age, estrogen receptor status (er), tumor size and grade for 198 individuals. The objective is to predict the time to distant metastasis.
Step2: Let us begin by fitting a penalized Cox model to various values of $\alpha$ using sksurv.linear_model.CoxPHSurvivalAnalysis and recording the coefficients we obtained for each $\alpha$.
Step3: Now, we can inspect how the coefficients change for varying $\alpha$.
Step4: We can see that if the penalty has a large weight (to the right), all coefficients are shrunk almost to zero. As the penalty's weight is decreased, the coefficients' value increases. We can also observe that the paths for X203391_at and tumor grade quickly separate themselves from the remaining coefficients, which indicates that this particular gene expression level and tumor grade are important predictive factors for time to distant metastasis.
Step5: The figure shows that the LASSO penalty indeed selects a small subset of features for large $\alpha$ (to the right) with only two features (purple and yellow line) being non-zero. As $\alpha$ decreases, more and more features become active and are assigned a non-zero coefficient until the entire set of features is used (to the left left). Similar to the plot above for the ridge penalty, the path for X203391_at stands out, indicating its importance in breast cancer. However, the overall most important factor seems to be a positive estrogen receptor status (er).
Step6: Choosing penalty strength $\alpha$
Step7: Using the estimated set of alphas, we perform 5 fold cross-validation to estimate the performance – in terms of concordance index – for each $\alpha$.
Step8: We can visualize the results by plotting the mean concordance index and its standard deviation across all folds for each $\alpha$.
Step9: The figure shows that there is a range for $\alpha$ to the right where it is too large and sets all coefficients to zero, as indicated by the 0.5 concordance index of a purely random model. On the other extreme, if $\alpha$ becomes too small, too many features enter the model and the performance approaches that of a random model again. The sweet spot (orange line) is somewhere in the middle. Let's inspect that model.
Step10: The model selected a total of 21 features, and it deemed X204540_at to be the most important one, followed by X203391_at and positive estrogen receptor status
Step11: For instance, we can now select a patient and determine how positive or negative estrogen receptor status would affect the survival function.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sksurv.datasets import load_breast_cancer
from sksurv.linear_model import CoxPHSurvivalAnalysis, CoxnetSurvivalAnalysis
from sksurv.preprocessing import OneHotEncoder
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
X, y = load_breast_cancer()
Xt = OneHotEncoder().fit_transform(X)
Xt.round(2).head()
alphas = 10. ** np.linspace(-4, 4, 50)
coefficients = {}
cph = CoxPHSurvivalAnalysis()
for alpha in alphas:
cph.set_params(alpha=alpha)
cph.fit(Xt, y)
key = round(alpha, 5)
coefficients[key] = cph.coef_
coefficients = (pd.DataFrame
.from_dict(coefficients)
.rename_axis(index="feature", columns="alpha")
.set_index(Xt.columns))
def plot_coefficients(coefs, n_highlight):
_, ax = plt.subplots(figsize=(9, 6))
n_features = coefs.shape[0]
alphas = coefs.columns
for row in coefs.itertuples():
ax.semilogx(alphas, row[1:], ".-", label=row.Index)
alpha_min = alphas.min()
top_coefs = coefs.loc[:, alpha_min].map(abs).sort_values().tail(n_highlight)
for name in top_coefs.index:
coef = coefs.loc[name, alpha_min]
plt.text(
alpha_min, coef, name + " ",
horizontalalignment="right",
verticalalignment="center"
)
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
ax.grid(True)
ax.set_xlabel("alpha")
ax.set_ylabel("coefficient")
plot_coefficients(coefficients, n_highlight=5)
cox_lasso = CoxnetSurvivalAnalysis(l1_ratio=1.0, alpha_min_ratio=0.01)
cox_lasso.fit(Xt, y)
coefficients_lasso = pd.DataFrame(
cox_lasso.coef_,
index=Xt.columns,
columns=np.round(cox_lasso.alphas_, 5)
)
plot_coefficients(coefficients_lasso, n_highlight=5)
cox_elastic_net = CoxnetSurvivalAnalysis(l1_ratio=0.9, alpha_min_ratio=0.01)
cox_elastic_net.fit(Xt, y)
coefficients_elastic_net = pd.DataFrame(
cox_elastic_net.coef_,
index=Xt.columns,
columns=np.round(cox_elastic_net.alphas_, 5)
)
plot_coefficients(coefficients_elastic_net, n_highlight=5)
import warnings
from sklearn.exceptions import FitFailedWarning
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
coxnet_pipe = make_pipeline(
StandardScaler(),
CoxnetSurvivalAnalysis(l1_ratio=0.9, alpha_min_ratio=0.01, max_iter=100)
)
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FitFailedWarning)
coxnet_pipe.fit(Xt, y)
estimated_alphas = coxnet_pipe.named_steps["coxnetsurvivalanalysis"].alphas_
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
make_pipeline(StandardScaler(), CoxnetSurvivalAnalysis(l1_ratio=0.9)),
param_grid={"coxnetsurvivalanalysis__alphas": [[v] for v in estimated_alphas]},
cv=cv,
error_score=0.5,
n_jobs=1).fit(Xt, y)
cv_results = pd.DataFrame(gcv.cv_results_)
alphas = cv_results.param_coxnetsurvivalanalysis__alphas.map(lambda x: x[0])
mean = cv_results.mean_test_score
std = cv_results.std_test_score
fig, ax = plt.subplots(figsize=(9, 6))
ax.plot(alphas, mean)
ax.fill_between(alphas, mean - std, mean + std, alpha=.15)
ax.set_xscale("log")
ax.set_ylabel("concordance index")
ax.set_xlabel("alpha")
ax.axvline(gcv.best_params_["coxnetsurvivalanalysis__alphas"][0], c="C1")
ax.axhline(0.5, color="grey", linestyle="--")
ax.grid(True)
best_model = gcv.best_estimator_.named_steps["coxnetsurvivalanalysis"]
best_coefs = pd.DataFrame(
best_model.coef_,
index=Xt.columns,
columns=["coefficient"]
)
non_zero = np.sum(best_coefs.iloc[:, 0] != 0)
print("Number of non-zero coefficients: {}".format(non_zero))
non_zero_coefs = best_coefs.query("coefficient != 0")
coef_order = non_zero_coefs.abs().sort_values("coefficient").index
_, ax = plt.subplots(figsize=(6, 8))
non_zero_coefs.loc[coef_order].plot.barh(ax=ax, legend=False)
ax.set_xlabel("coefficient")
ax.grid(True)
coxnet_pred = make_pipeline(
StandardScaler(),
CoxnetSurvivalAnalysis(l1_ratio=0.9, fit_baseline_model=True)
)
coxnet_pred.set_params(**gcv.best_params_)
coxnet_pred.fit(Xt, y)
surv_fns = coxnet_pred.predict_survival_function(Xt)
time_points = np.quantile(y["t.tdm"], np.linspace(0, 0.6, 100))
legend_handles = []
legend_labels = []
_, ax = plt.subplots(figsize=(9, 6))
for fn, label in zip(surv_fns, Xt.loc[:, "er=positive"].astype(int)):
line, = ax.step(time_points, fn(time_points), where="post",
color="C{:d}".format(label), alpha=0.5)
if len(legend_handles) <= label:
name = "positive" if label == 1 else "negative"
legend_labels.append(name)
legend_handles.append(line)
ax.legend(legend_handles, legend_labels)
ax.set_xlabel("time")
ax.set_ylabel("Survival probability")
ax.grid(True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In order to activate the interactive visualisation of the histogram that is later created we can use the JSROOT magic
Step2: Next we have to open the data that we want to analyze. As described above the data is stored in a *.root file.
Step3: After the data is opened we create a canvas on which we can draw a histogram. If we do not have a canvas we cannot see our histogram at the end. Its name is Canvas and its header is c. The two following arguments define the width and the height of the canvas.
Step4: The next step is to define a tree named t to get the data out of the .root file.
Step5: Now we define a histogram that will later be placed on this canvas. Its name is variable, the header of the histogram is Mass of the Z boson, the x axis is named mass [GeV] and the y axis is named events. The three following arguments indicate that this histogram contains 30 bins which have a range from 40 to 140.
Step6: Time to fill our above defined histogram. At first we define some variables and then we loop over the data. We also make some cuts as you can see in the # comments.
Step7: After filling the histogram we want to see the results of the analysis. First we draw the histogram on the canvas and then the canvas on which the histogram lies.
|
<ASSISTANT_TASK:>
Python Code:
import ROOT
%jsroot on
f = ROOT.TFile.Open("mc_105986.ZZ.root")
#f = ROOT.TFile.Open("mc_147770.Zee.root")
#f = ROOT.TFile.Open("http://opendata.atlas.cern/release/samples/MC/mc_147770.Zee.root")
canvas = ROOT.TCanvas("Canvas","c",800,600)
tree = f.Get("mini")
hist = ROOT.TH1F("variable","Mass of the Z boson; mass [GeV]; events",30,40,140)
leadLepton = ROOT.TLorentzVector()
trailLepton = ROOT.TLorentzVector()
for event in tree:
# Cut #1: At least 2 leptons
if tree.lep_n == 2:
# Cut #2: Leptons with opposite charge
if (tree.lep_charge[0] != tree.lep_charge[1]):
# Cut #3: Leptons of the same family (2 electrons or 2 muons)
if (tree.lep_type[0] == tree.lep_type[1]):
# Let's define one TLorentz vector for each, e.i. two vectors!
leadLepton.SetPtEtaPhiE(tree.lep_pt[0]/1000., tree.lep_eta[0], tree.lep_phi[0], tree.lep_E[0]/1000.)
trailLepton.SetPtEtaPhiE(tree.lep_pt[1]/1000., tree.lep_eta[1], tree.lep_phi[1], tree.lep_E[1]/1000.)
# Next line: addition of two TLorentz vectors above --> ask mass very easy (devide by 1000 to get value in GeV)
invmass = leadLepton + trailLepton
hist.Fill(invmass.M())
hist.Draw()
canvas.Draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: gpu info
Step2: Understand the input
Step3: Kernel Info from the single stream
Step4: model 3 cuda streams
Step5: start kernel from beginning
Step6: set the h2d start for all the cuda streams
Step7: merge all the cuda stream trace together
Step8: start algorithm
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import warnings
import pandas as pd
import numpy as np
import os
import sys # error msg, add the modules
import operator # sorting
from math import *
import matplotlib.pyplot as plt
sys.path.append('../../')
import cuda_timeline
import read_trace
import avgblk
import cke
from model_param import *
#from df_util import *
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
gtx950 = DeviceInfo()
gtx950.sm_num = 6
gtx950.sharedmem_per_sm = 49152
gtx950.reg_per_sm = 65536
gtx950.maxthreads_per_sm = 2048
# init SM resources
SM_resList, SM_traceList = init_gpu(gtx950)
#SM_resList[0]
SM_traceList[0]
trace_s1 = 'trace_s1_5m.csv'
df_trace_s1 = read_trace.Trace2dataframe(trace_s1)
trace_s3 = 'trace_s3_5m.csv'
df_trace_s3 = read_trace.Trace2dataframe(trace_s3)
#df_trace_s1
cuda_timeline.plot_trace(df_trace_s1)
#cuda_timeline.plot_trace(df_trace_s2)
cuda_timeline.plot_trace(df_trace_s3)
# extract kernel info from trace
# warning: currently lmted to one kernel
kernel = read_trace.GetKernelInfo(df_trace_s1, gtx950)
Dump_kernel_info(kernel)
# for each stream, have a dd for each kernel
stream_kernel_list = []
stream_num = 3
for sid in range(stream_num):
#print sid
# key will be the kernel order
# value will be the kernel info
kern_dd = {}
kern_dd[0] = Copy_kernel_info(kernel)
stream_kernel_list.append(kern_dd)
Dump_kernel_info(stream_kernel_list[0][0])
df_s1_trace_timing = read_trace.Get_timing_from_trace(df_trace_s1)
df_s1 = read_trace.Reset_starting(df_s1_trace_timing)
df_s1
# find when to start the stream and update the starting pos for the trace
H2D_H2D_OVLP_TH = 3.158431
df_cke_list = cke.init_trace_list(df_s1, stream_num = stream_num, h2d_ovlp_th = H2D_H2D_OVLP_TH)
df_cke_list[0]
df_cke_list[1]
df_cke_list[2]
df_all_api = cke.init_sort_api_with_extra_cols(df_cke_list)
df_all_api
# stream_id list
stream_list = [float(x) for x in range(stream_num)]
# pick the 1st sleep api
df_all_api, r1, r1_stream = cke.pick_base_call(df_all_api)
df_all_api = SetWake(df_all_api, r1)
print('row {}, stream-id {}'.format(r1, r1_stream))
# select coming (stream_num - 1) api calls, they are potential concurrency api calls
cc_rows = FindComingCalls(df_all_api, r1, stream_num)
count = 1
active_stream_pool = []
active_stream_pool.append(r1_stream)
for r2 in cc_rows:
print('active_stream_pool : {}'.format(active_stream_pool))
#if count == 1: break
#if count == 2: break
# wake target row
df_all_api = SetWake(df_all_api, r2)
r2_stream = GetStreamID(df_all_api, r2)
print('row {}, stream-id {}'.format(r2, r2_stream))
if count == 2: break
# check whether current stream is in the active stream pool
# where I mean they are from the same stream
if r2_stream in active_stream_pool:
# finish previous call
df_all_api = cke.finish_call(df_all_api, r1)
# update the time for all the calls in this stream
df_all_api = cke.UpdateStreamTime(df_all_api)
# adjust the r2 current pos and pred_end
df_all_api = UpdateCell(df_all_api, r2, 'current_pos', GetInfo(df_all_api, r2, 'start'))
df_all_api = UpdateCell(df_all_api, r2, 'pred_end', GetInfo(df_all_api, r2, 'end'))
break
# if it is a new stream, add to the pool
active_stream_pool.append(r2_stream)
#---------------------------------------
# move the current pos to the r2 start (to do)
#---------------------------------------
#if count == 1: break
#if count == 2: break
# rangeT = cke.Get_pred_range(df_all_api)
# print rangeT
# # check whether there is conc during the rangeT
# cke.Check_ovlp(df_all_api, r1, r2)
#break
# when they are from different streams
#-----------------------
# move the current_pos to the starting of coming api r2, and update r1 status
#-----------------------
#df_all_api = cke.StartNext_byType(df_all_api, [r1, r2])
#break
df_all_api = cke.MoveCurPos(df_all_api, r2) # start r2, check concurrency,and update the timing
#if count == 1: break
#if count == 2: break
#break
#-----------------------------
# if one call is done, continue the next round
#-----------------------------
rowDone, rowDone_list = cke.CheckRowDone(df_all_api, [r1, r2])
if rowDone:
print('soem call is ended, go for next call')
print rowDone_list
for each in rowDone_list: # throw away the done stream
active_stream_pool = [x for x in active_stream_pool if x <> each]
print active_stream_pool
count += 1
#break
continue
whichType = cke.CheckType(df_all_api, r1, r2) # check whether the same api
print whichType
if whichType == None:
# run noconflict
pass
elif whichType in ['h2d', 'd2h']: # data transfer in the same direction
df_all_api = cke.Predict_transferOvlp(df_all_api, [r1, r2])
if count == 1: break
#break
else:
# concurrent kernel: todo
pass
#--------------------
# check if any api is done, and update the timing for the other apis in that stream
#--------------------
df_all_api = cke.UpdateStreamTime(df_all_api)
count += 1
#break
## after checking the commming call for ovlp
df_all_api
#
# run above
#
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modeling and Simulation in Python
Step3: Testing make_system
Step4: Testing slope_func
Step5: Now we can run the simulation.
Step6: Plotting r
Step7: We can also see the relationship between y and r, which I derive analytically in the book.
Step8: And here's the figure from the book.
Step9: We can use interpolation to find the time when y is 47 meters.
Step10: At that point r is 55 mm, which is Rmax, as expected.
Step11: The total amount of rotation is 1253 rad.
Step12: Unrolling
Step13: And a few more parameters in the Condition object.
Step15: make_system computes rho_h, which we'll need to compute moment of inertia, and k, which we'll use to compute r.
Step16: Testing make_system
Step18: Here's how we compute I as a function of r
Step19: When r is Rmin, I is small.
Step20: As r increases, so does I.
Step22: Here's the slope function.
Step23: Testing slope_func
Step24: Now we can run the simulation.
Step25: And look at the results.
Step26: Extrating the time series
Step27: Plotting theta
Step28: Plotting omega
Step29: Plotting y
Step30: Here's the figure from the book.
Step31: Yo-yo
Step33: Here's a make_system function that computes I and k based on the system parameters.
Step34: Testing make_system
Step35: Write a slope function for this system, using these results from the book
Step36: Test your slope function with the initial conditions.
Step37: Then run the simulation.
Step38: Check the final conditions. If things have gone according to plan, the final value of y should be close to 0.
Step39: Plot the results.
Step40: theta should increase and accelerate.
Step41: y should decrease and accelerate down.
|
<ASSISTANT_TASK:>
Python Code:
# If you want the figures to appear in the notebook,
# and you want to interact with them, use
# %matplotlib notebook
# If you want the figures to appear in the notebook,
# and you don't want to interact with them, use
# %matplotlib inline
# If you want the figures to appear in separate windows, use
# %matplotlib qt5
# tempo switch from one to another, you have to select Kernel->Restart
%matplotlib inline
from modsim import *
kg = UNITS.kilogram
m = UNITS.meter
s = UNITS.second
N = UNITS.newton
condition = Condition(mass = 0.03 * kg,
fraction = 1 / 3,
k = 9810.0 * N / m,
duration = 0.3 * s,
L = 0.05 * m,
d = 0.005 * m,
v1 = 0 * m / s,
v2 = 0 * m / s,
g = 9.8 * m / s**2)
condition = Condition(mass = 0.03,
fraction = 1 / 3,
k = 9810.0,
duration = 0.3,
L = 0.05,
d = 0.005,
v1 = 0,
v2 = 0,
g = 9.8)
def make_system(condition):
Make a system object.
condition: Condition with
returns: System with init
unpack(condition)
x1 = L - d # upper mass
x2 = 0 # lower mass
init = State(x1=x1, x2=x2, v1=v1, v2=v2)
m1, m2 = fraction*mass, (1-fraction)*mass
ts = linspace(0, duration, 1001)
return System(init=init, m1=m1, m2=m2, k=k, L=L, ts=ts)
system = make_system(condition)
system
system.init
def slope_func(state, t, system):
Computes the derivatives of the state variables.
state: State object with theta, y, r
t: time
system: System object with r, k
returns: sequence of derivatives
x1, x2, v1, v2 = state
unpack(system)
dx = x1 - x2
f_spring = k * (L - dx)
a1 = f_spring/m1 - g
a2 = -f_spring/m2 - g
if t < 0.003 and a2 < 0:
a2 = 0
return v1, v2, a1, a2
slope_func(system.init, 0, system)
run_odeint(system, slope_func)
system.results.tail()
plot(system.results.x1)
plot(system.results.x2)
plot(system.results.x1 - system.results.x2)
plot(ys, color='green', label='y')
decorate(xlabel='Time (s)',
ylabel='Length (m)')
plot(rs, color='red', label='r')
decorate(xlabel='Time (s)',
ylabel='Radius (mm)')
plot(rs, ys, color='purple')
decorate(xlabel='Radius (mm)',
ylabel='Length (m)',
legend=False)
subplot(3, 1, 1)
plot(thetas, label='theta')
decorate(ylabel='Angle (rad)')
subplot(3, 1, 2)
plot(ys, color='green', label='y')
decorate(ylabel='Length (m)')
subplot(3, 1, 3)
plot(rs, color='red', label='r')
decorate(xlabel='Time(s)',
ylabel='Radius (mm)')
savefig('chap11-fig01.pdf')
T = interp_inverse(ys, kind='cubic')
t_end = T(47)
t_end
R = interpolate(rs, kind='cubic')
R(t_end)
THETA = interpolate(thetas, kind='cubic')
THETA(t_end)
kg = UNITS.kilogram
N = UNITS.newton
condition = Condition(Rmin = 0.02 * m,
Rmax = 0.055 * m,
Mcore = 15e-3 * kg,
Mroll = 215e-3 * kg,
L = 47 * m,
tension = 2e-4 * N,
duration = 180 * s)
def make_system(condition):
Make a system object.
condition: Condition with Rmin, Rmax, Mcore, Mroll,
L, tension, and duration
returns: System with init, k, rho_h, Rmin, Rmax,
Mcore, Mroll, ts
unpack(condition)
init = State(theta = 0 * radian,
omega = 0 * radian/s,
y = L)
area = pi * (Rmax**2 - Rmin**2)
rho_h = Mroll / area
k = (Rmax**2 - Rmin**2) / 2 / L / radian
ts = linspace(0, duration, 101)
return System(init=init, k=k, rho_h=rho_h,
Rmin=Rmin, Rmax=Rmax,
Mcore=Mcore, Mroll=Mroll,
ts=ts)
system = make_system(condition)
system
system.init
def moment_of_inertia(r, system):
Moment of inertia for a roll of toilet paper.
r: current radius of roll in meters
system: System object with Mcore, rho, Rmin, Rmax
returns: moment of inertia in kg m**2
unpack(system)
Icore = Mcore * Rmin**2
Iroll = pi * rho_h / 2 * (r**4 - Rmin**4)
return Icore + Iroll
moment_of_inertia(system.Rmin, system)
moment_of_inertia(system.Rmax, system)
def slope_func(state, t, system):
Computes the derivatives of the state variables.
state: State object with theta, omega, y
t: time
system: System object with Rmin, k, Mcore, rho_h, tension
returns: sequence of derivatives
theta, omega, y = state
unpack(system)
r = sqrt(2*k*y + Rmin**2)
I = moment_of_inertia(r, system)
tau = r * tension
alpha = tau / I
dydt = -r * omega
return omega, alpha, dydt
slope_func(system.init, 0*s, system)
run_odeint(system, slope_func)
system.results.tail()
thetas = system.results.theta
omegas = system.results.omega
ys = system.results.y
plot(thetas, label='theta')
decorate(xlabel='Time (s)',
ylabel='Angle (rad)')
plot(omegas, color='orange', label='omega')
decorate(xlabel='Time (s)',
ylabel='Angular velocity (rad/s)')
plot(ys, color='green', label='y')
decorate(xlabel='Time (s)',
ylabel='Length (m)')
subplot(3, 1, 1)
plot(thetas, label='theta')
decorate(ylabel='Angle (rad)')
subplot(3, 1, 2)
plot(omegas, color='orange', label='omega')
decorate(ylabel='Angular velocity (rad/s)')
subplot(3, 1, 3)
plot(ys, color='green', label='y')
decorate(xlabel='Time(s)',
ylabel='Length (m)')
savefig('chap11-fig02.pdf')
condition = Condition(Rmin = 8e-3 * m,
Rmax = 16e-3 * m,
Rout = 35e-3 * m,
mass = 50e-3 * kg,
L = 1 * m,
g = 9.8 * m / s**2,
duration = 1 * s)
def make_system(condition):
Make a system object.
condition: Condition with Rmin, Rmax, Rout,
mass, L, g, duration
returns: System with init, k, Rmin, Rmax, mass,
I, g, ts
unpack(condition)
init = State(theta = 0 * radian,
omega = 0 * radian/s,
y = L,
v = 0 * m / s)
I = mass * Rout**2 / 2
k = (Rmax**2 - Rmin**2) / 2 / L / radian
ts = linspace(0, duration, 101)
return System(init=init, k=k,
Rmin=Rmin, Rmax=Rmax,
mass=mass, I=I, g=g,
ts=ts)
system = make_system(condition)
system
system.init
# Solution goes here
slope_func(system.init, 0*s, system)
run_odeint(system, slope_func)
system.results.tail()
thetas = system.results.theta
ys = system.results.y
plot(thetas, label='theta')
decorate(xlabel='Time (s)',
ylabel='Angle (rad)')
plot(ys, color='green', label='y')
decorate(xlabel='Time (s)',
ylabel='Length (m)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
<ASSISTANT_TASK:>
Python Code:
%run ../linked_list/linked_list.py
%load ../linked_list/linked_list.py
class MyLinkedList(LinkedList):
def delete_node(self, node):
# TODO: Implement me
pass
# %load test_delete_mid.py
from nose.tools import assert_equal
class TestDeleteNode(object):
def test_delete_node(self):
print('Test: Empty list, null node to delete')
linked_list = MyLinkedList(None)
linked_list.delete_node(None)
assert_equal(linked_list.get_all_data(), [])
print('Test: One node')
head = Node(2)
linked_list = MyLinkedList(head)
linked_list.delete_node(head)
assert_equal(linked_list.get_all_data(), [None])
print('Test: Multiple nodes')
linked_list = MyLinkedList(None)
node0 = linked_list.insert_to_front(1)
node1 = linked_list.insert_to_front(3)
node2 = linked_list.insert_to_front(4)
node3 = linked_list.insert_to_front(1)
linked_list.delete_node(node2)
assert_equal(linked_list.get_all_data(), [1, 3, 1])
print('Success: test_delete_node')
def main():
test = TestDeleteNode()
test.test_delete_node()
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1 HMC Inference for GP Regression
Step2: Let us Make a GP Regression model and give some general prior distributions to model parameters.
Step3: Sampling the posterior distribution of model parameters
Step4: Plot the samples
Step5: Plot the posterior marginal distribution of model parameters
Step6: Plot the model parameters (lengthscale, variance and noise variance) against each other
Step7: By setting the model parameters to the posterior mean, we can visualize the model fit
Step8: Sample the posterior distribution of X given some new Y
Step9: Generate the inference model for the new observations. X_new are the MAP estimations by optimizing the log likelihood. As plotted with a red dot, the MAP estimation corresponds to only one of the modes.
Step10: Draw 10,000 samples from the inference model
Step11: Plot the samples
Step12: Plot the marginal distribution of inferred inputs. The two modes of inputs are clearly visible from the sampled posterior distribution.
Step13: Example 2 HMC for lengthscale and variance with marathon data
Step14: Run HMC
Step15: Seaborn
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import GPy
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
from pylab import *
# Let's make some synthetic data
x = np.linspace(0.,2*np.pi,100)[:,None]
y = -cos(x)+np.random.randn(*x.shape)*0.3+1
_ = plot(x,y,'.')
# Make a GP regression model
m = GPy.models.GPRegression(x,y)
# Give some general prior distributions for model parameters
m.kern.lengthscale.set_prior(GPy.priors.Gamma.from_EV(1.,10.))
m.kern.variance.set_prior(GPy.priors.Gamma.from_EV(1.,10.))
m.likelihood.variance.set_prior(GPy.priors.Gamma.from_EV(1.,10.))
_=m.plot()
hmc = GPy.inference.mcmc.HMC(m,stepsize=5e-2)
s = hmc.sample(num_samples=1000) # Burnin
s = hmc.sample(num_samples=1000)
plot(s)
labels = ['kern variance', 'kern lengthscale','noise variance']
samples = s[300:] # cut out the burn-in period
from scipy import stats
xmin = samples.min()
xmax = samples.max()
xs = np.linspace(xmin,xmax,100)
for i in xrange(samples.shape[1]):
kernel = stats.gaussian_kde(samples[:,i])
plot(xs,kernel(xs),label=labels[i])
_ = legend()
fig = figure(figsize=(14,4))
ax = fig.add_subplot(131)
_=ax.plot(samples[:,0],samples[:,1],'.')
ax.set_xlabel(labels[0]); ax.set_ylabel(labels[1])
ax = fig.add_subplot(132)
_=ax.plot(samples[:,1],samples[:,2],'.')
ax.set_xlabel(labels[1]); ax.set_ylabel(labels[2])
ax = fig.add_subplot(133)
_=ax.plot(samples[:,0],samples[:,2],'.')
ax.set_xlabel(labels[0]); ax.set_ylabel(labels[2])
# Set the model parameters as the posterior mean
m.kern.variance[:] = samples[:,0].mean()
m.kern.lengthscale[:] = samples[:,1].mean()
m.likelihood.variance[:] = samples[:,2].mean()
print m
_=m.plot()
y_new = np.array([1.5])[:,None]
x_new,mi = m.infer_newX(y_new)
print mi
m.plot()
plot(x_new,y_new,'or')
hmc_new = GPy.inference.mcmc.HMC(mi,stepsize=2e-1)
s_new = hmc_new.sample(num_samples=10000,hmc_iters=10)
_ = plot(s_new[:,:])
from scipy import stats
samples_new = s_new[:]
xmin = samples_new.min()
xmax = samples_new.max()
xs = np.linspace(xmin,xmax,100)
for i in xrange(samples_new.shape[1]):
kernel = stats.gaussian_kde(samples_new[:,i])
plot(xs,kernel(xs))
m = GPy.examples.regression.olympic_marathon_men()
#
#set prior for lengthscale and variance.
m.kern.variance.set_prior(GPy.priors.Gamma.from_EV(25.,150.))
m.kern.lengthscale.set_prior(GPy.priors.Gamma.from_EV(120.,2000.))
print m
# initialise hmc
hmc = GPy.inference.mcmc.HMC(m,stepsize=2e-1)
# run hmc
t = hmc.sample(num_samples=20000,hmc_iters=20)
# Sample parameters
#hmc = GPy.inference.optimization.HMC(m, stepsize=5e-1)
#t = hmc.sample(m_iters=50000,hmc_iters=20)
_=plot(t)
print t.mean(axis=0)
print t.std(axis=0)
_=hist(t[:,:2],50)
import seaborn as sns, pandas as pd
plt.rcParams['text.usetex'] = False
df = pd.DataFrame(t, columns=m.parameter_names_flat())
ax = sns.kdeplot(df['rbf.variance'],
color="b", shade=True, shade_lowest=False)
ax = sns.kdeplot(df['rbf.lengthscale'],
color="r", shade=True, shade_lowest=False)
sns.set(style="white", color_codes=True)
_ = sns.jointplot(data=df, x='rbf.variance', y='rbf.lengthscale', kind="hex",
marginal_kws=dict(kde=True, hist=True, kde_kws=dict(shade=False)),
stat_func=None
)
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Frage 1
Step2: Frage 2
Step3: Frage 3
Step4: Frage 4
Step5: Heiko Mader
Step6: Aufgabe 3
Step7: Aufgabe 4
Step8: Leichte Änderungen führen zu einem "fast richtigen" Ergebnis
|
<ASSISTANT_TASK:>
Python Code:
%sql mysql://steinam:steinam@localhost/sommer_2014
%%sql
select * from artikel
where Art_Bezeichnung like '%Schmerzmittel%' or Art_Bezeichnung like '%schmerzmittel%';
%%sql
select k.Kd_firma, sum(rp.RgPos_Menge * rp.RgPos_Preis) as Umsatz
from Kunde k left join Rechnung r
on k.Kd_Id = r.Rg_Kd_ID
inner join Rechnungsposition rp
on r.Rg_ID = rp.RgPos_RgID
group by k.`Kd_Firma`
order by Umsatz desc;
%%sql
-- Originallösung bringt das gleiche Ergebnis
select k.`Kd_Firma`,
(select sum(RgPos_menge * RgPos_Preis)
from `rechnungsposition` rp, rechnung r
where r.`Rg_ID` = `rp`.`RgPos_RgID` and r.`Rg_Kd_ID` = k.`Kd_ID`) as Umsatz
from kunde k order by Umsatz desc
%%sql
-- meine Lösung
select artikel.*, sum(RgPos_Menge) as Menge, count(RgPos_ID) as Anzahl
from artikel inner join `rechnungsposition`
where `rechnungsposition`.`RgPos_ArtID` = `artikel`.`Art_ID`
group by artikel.`Art_ID`
%%sql
-- Leitungslösung
select artikel.* ,
(select sum(RgPOS_Menge) from Rechnungsposition rp
where rp.RgPos_ArtID = artikel.Art_ID) as Menge,
(select count(RgPOS_menge) from Rechnungsposition rp
where rp.RgPos_ArtID = artikel.Art_ID) as Anzahl
from Artikel
%%sql
-- Original
select left(kunde.`Kd_PLZ`,1) as Region,
sum(`rechnungsposition`.`RgPos_Menge` * `rechnungsposition`.`RgPos_Preis`) as Summe
from kunde left join rechnung
on kunde.`Kd_ID` = rechnung.`Rg_Kd_ID`
left join rechnungsposition
on `rechnung`.`Rg_ID` = `rechnungsposition`.`RgPos_RgID`
group by Region
order by Summe;
%%sql
-- Inner join ändert nichts
select left(kunde.`Kd_PLZ`,1) as Region,
sum(`rechnungsposition`.`RgPos_Menge` * `rechnungsposition`.`RgPos_Preis`) as Summe
from kunde inner join rechnung
on kunde.`Kd_ID` = rechnung.`Rg_Kd_ID`
inner join rechnungsposition
on `rechnung`.`Rg_ID` = `rechnungsposition`.`RgPos_RgID`
group by Region
order by Summe;
%%sql
select kunde.*, umsatz from kunde
inner join (
select (RgPos_menge * RgPos_Preis) as Umsatz, kd_id
from `rechnungsposition`
inner join rechnung on `rechnungsposition`.`RgPos_ID` = `rechnung`.`Rg_ID`
inner join kunde on `rechnung`.`Rg_Kd_ID` = Kunde.`Kd_ID`
group by `Kd_ID`
) a
on Kunde.`Kd_ID` = a.Kd_ID
order by umsatz desc;
%%sql
select a.*, mengeGesamt,anzahlRechPos
from artikel a
Inner join (
select SUM(RgPos_menge) as mengeGesamt, art_id
from `rechnungsposition` inner join artikel
on `rechnungsposition`.`RgPos_ArtID` = artikel.`Art_ID`
group by art_id
) b on a.`Art_ID` = b.art_id
Inner join
(select count(*) as anzahlRechPos, art_id
from `rechnungsposition` inner join artikel
on `rechnungsposition`.`RgPos_ArtID` = artikel.`Art_ID`
group by art_id
) c on a.`Art_ID` = c.art_id
%%sql
select gebiet, umsatz from `kunde`
inner join (
select kd_plz as gebiet, kd_id from `kunde`
where kd_plz in
(0%,1%,2%,3%,4%,5%,6%,7%,8%,9%)
group by kd_id
) a on kunde.`Kd_ID` = b.kd_id
inner join (
select rgPos_Menge * rgPos_Preis as Umsatz2, kd_id
from `rechnungsposition` inner join
rechnung on `rechnungsposition`.`RgPos_RgID` = rechnung.`Rg_ID`
inner join kunde on `rechnung`.`Rg_Kd_ID` = kunde.`Kd_ID`
group by kd_id
) b on `kunde`.`Kd_ID` = b.kd_id
order by umsatz desc;
%%sql
select gebiet, umsatz from `kunde`
inner join (
select kd_plz as gebiet, kd_id from `kunde`
where left(kd_plz,1) in
(0,1,2,3,4,5,6,7,8,9)
group by kd_id
) a on kunde.`Kd_ID` = a.kd_id
inner join (
select rgPos_Menge * rgPos_Preis as Umsatz, kd_id
from `rechnungsposition` inner join
rechnung on `rechnungsposition`.`RgPos_RgID` = rechnung.`Rg_ID`
inner join kunde on `rechnung`.`Rg_Kd_ID` = kunde.`Kd_ID`
group by kd_id
) b on `kunde`.`Kd_ID` = b.kd_id
order by umsatz desc;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: Part 2
Step3: Test the docker image
Step4: Test the docker image
Step5: Check convergence (visual inspection)
Step7: Test the docker image
|
<ASSISTANT_TASK:>
Python Code:
# check which python is in use.
import sys
print('Notebook is running:', sys.executable) # /opt/conda/bin/python
# or uncomment the code below
from platform import python_version # 3.7.6
print('The current Python version is', python_version())
import hddm, IPython
print('The current HDDM version is', hddm.__version__) # 0.8.0
import kabuki
print('The current kabuki version is', kabuki.__version__) # 0.6.3
import matplotlib
print('The current matplotlib version is', matplotlib.__version__) # 0.3.1.3
# Warning:`IPython.parallel` package has been deprecated since IPython 4.0.
#print('The current IPython version is', IPython.__version__)
import os
cur_dir = os.getcwd()
print('The current working directory is', cur_dir)
import ipyparallel as ipp
rc = ipp.Client()
rc.ids
def foo():
import time
time.sleep(5)
return 'foo'
def bar():
import time
time.sleep(10)
return 'bar'
res1 = rc[0].apply(foo)
res2 = rc[1].apply(bar)
results = [res1, res2]
while not all(map(lambda ar: ar.ready(), results)):
pass
print(res1.get(), res2.get())
dv = rc[:]
res = dv.map(lambda x, y, z: x + y + z, range(10), range(10), range(10))
res.get()
@dv.remote(block = True)
def f1(n):
import numpy as np
return np.random.rand(n)
f1(4)
%matplotlib inline
# Preparation
import os, time, csv, datetime
import kabuki, hddm
from kabuki.analyze import gelman_rubin
from datetime import date
import random
import pandas as pd
import matplotlib.pyplot as plt
# check the data
# load data
df = hddm.load_csv('df_example.csv')
df.head(10) # uncomment if you want to check the head of the data
# Uncomment the codes below if you are interested in checking the distribution of raw data
# plot and save the distribution of raw data:
fig = plt.figure()
ax = fig.add_subplot(111, xlabel='RT', ylabel='count', title='RT distributions')
for i, subj_data in df.groupby('subj_idx'):
subj_data.rt.hist(bins=20, histtype='step', ax=ax)
# define a function to run model in parallel
def run_model(id):
print('running model%i'%id);
import hddm
import random
#import os
#cur_dir = os.getcwd()
exp_name = 'example'
print('running models %i'%id, 'for for exp', exp_name)
# USE the absolute directory in docker.
dbname = '/home/jovyan/example/df_' + exp_name + '_chain_vaz_test_%i.db'%id # define the database name, which uses pickle format
mname = '/home/jovyan/example/df_' + exp_name + '_chain_vaz_test_%i'%id # define the name for the model
fname = '/home/jovyan/example/df_' + exp_name + '.csv'
df = hddm.load_csv(fname)
df_subj = df['subj_idx'].unique()
# random select without repetition
random.seed(10)
df_test_list = []
for i in range(10):
pos = random.randint(0, (len(df_subj)-1))
df_test_list.append(df_subj[pos])
df_test = df[df['subj_idx'].isin(df_test_list)]
m = hddm.HDDMStimCoding(df_test,
include='z',
stim_col='stim',
depends_on={'v':['match','val', 'id']},
split_param='v',
drift_criterion=False,
p_outlier=0.05)
m.find_starting_values()
m.sample(500, burn=100, dbname=dbname, db='pickle')
m.save(mname)
return m
start_time = time.time() # the start time of the processing
m = run_model(0)
print("\nRunning 1 chain used: %f seconds." % (time.time() - start_time))
# m = hddm.load("df_example_chain_vaz_test_0")
print("M_df1a_v_corr DIC: %f" % m.dic)
m.plot_posterior_predictive()
from ipyparallel import Client
v = Client()[:]
start_time = time.time() # the start time of the processing
jobs = v.map(run_model, range(4)) # 4 is the number of CPUs
models = jobs.get()
print("\nRunning 4 chains used: %f seconds." % (time.time() - start_time))
print("M_df1a_v_corr DIC: %f" % models[0].dic)
gelman_rubin(models)
# Create a new model concatenating all individual models.
import kabuki
combined_model = kabuki.utils.concat_models(models)
def plot_grp_trace(models, param):
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df_grp_traces = []
for i in range(4):
df_m = models[i]
df_grp_trace = df_m.get_group_traces()
df_grp_trace['chain'] = i
print('chain', i, df_grp_trace.shape)
df_grp_traces.append(df_grp_trace)
df_grp_traces = pd.concat(df_grp_traces)
# select the columns included that parameter
filter_col = [col for col in df_grp_traces if col.startswith(param)]
filter_col.append('chain') # add the chain column to the data
#df_tmp = df_grp_traces[df_grp_traces.columns.intersection(filter_col)]
df_tmp = df_grp_traces.loc[:,filter_col]
df_tmp["sample_id"] = df_tmp.index
df_melt = pd.melt(df_tmp,
id_vars=['sample_id', 'chain'],
value_vars=filter_col[:-1],
var_name='cond',
value_name='value')
sns.set_palette("Set2")
for indx, df_tmp in df_melt.groupby('cond'):
fig, axs = plt.subplots(ncols=2,figsize=(20, 9))
print(indx)
sns.lineplot(x='sample_id', y='value', hue="chain", data=df_tmp, ax=axs[0], palette='Set2')
sns.distplot(df_tmp.loc[df_tmp['chain'] == 0, 'value'], hist = False, kde = True,
kde_kws = {'shade': False, 'linewidth': 3},ax=axs[1], label = '0')
sns.distplot(df_tmp.loc[df_tmp['chain'] == 1, 'value'], hist = False, kde = True,
kde_kws = {'shade': False, 'linewidth': 3},ax=axs[1], label = '1')
sns.distplot(df_tmp.loc[df_tmp['chain'] == 2, 'value'], hist = False, kde = True,
kde_kws = {'shade': False, 'linewidth': 3},ax=axs[1], label = '2')
sns.distplot(df_tmp.loc[df_tmp['chain'] == 3, 'value'], hist = False, kde = True,
kde_kws = {'shade': False, 'linewidth': 3},ax=axs[1], label = '3')
#axs[0].set_ylim([0.5, 3])
axs[0].set_title(indx, fontsize=20)
#axs[1].set_xlim([0.5, 3])
axs[1].set_title(indx, fontsize=20)
plt.show()
plot_grp_trace(models=df1a_models, param='z')
import sys
import time
from IPython.display import clear_output
def wait_watching_stdout(ar, dt=30):
ar: vmap output of the models being run
dt: number of seconds between checking output, you can make is shorter or longer.
while not ar.ready():
stdouts = ar.stdout
if not any(stdouts):
continue
# clear_output doesn't do much in terminal environments
clear_output()
print('-' * 30)
print("%.3fs elapsed" % ar.elapsed)
print("")
for out in ar.stdout: print(out);
sys.stdout.flush()
time.sleep(dt)
from ipyparallel import Client
start_time = time.time() # the start time of the processing
v = Client()[:]
jobs = v.map(run_model, range(4)) # 4 is the number of CPUs
wait_watching_stdout(jobs)
models = jobs.get()
m1_time = time.time()
print("\nRunning 4 chains used: %f seconds." % (m1_time - start_time))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data sets that we will work on are logs from the firewall, we will use the small file pdata.csv to simplify readings and then we will use the huge file projectdata.csv which contains logs on source IP addresses , timestamp, http event ..etc
Step2: we will start by using bar plot to visualise IP addresses Vs their hit count, however due to the huge size we will select samples with hit number > 100
Step3: As shown in the bar chart above few samples have extream values , we have to investigate them deeper
Step4: The box plot above shows the outliars outside of the box, also it shows the box and the variance are closer to the 1st quartile which indicates that we should use a hit count of 700 instead of 100 in order to isolate the outliars
Step5: As shown in the bar chart above there are 455 source IP address with hit count over 700, also there few outliars with even more than 10000 hits
Step6: the scatter plot above shows the top suspected attackers, as we can see there are
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
%matplotlib inline
df = pd.read_csv('pdata.csv')
print(df.columns.values)
print('\nNumber of hits on the website is %s hit' %len(df['Source IP']))
total_hits = Counter(df['Source IP'])
# filterout IP addresses with hit count > 100 hit
hits = {k:v for k,v in total_hits.items() if(v > 100)}
print('\n Size of filtered hits is %s hits' %len(hits))
# x contains the IP addresses
sourceIPs = hits.keys()
# y contains the number of hits per that IP
num_hits = hits.values()
# indexes will facilitate in drawing as index of IP addresses
indexes = np.arange(len(hits))
plt.bar(indexes, num_hits,width=0.5)
plt.xlabel("Source IP Addresses")
plt.ylabel("Number of hits")
plt.show()
sns.boxplot(pd.Series(hits))
full_df = pd.read_csv('projectdata.csv')
total_hits = Counter(full_df['Source IP'])
# filterout IP addresses with hit count > 700 hit
hits = {k:v for k,v in total_hits.items() if(v > 700)}
print('\n Size of filtered hits is %s hits' %len(hits))
# x contains the IP addresses
#sourceIPs = hits.keys()
# y contains the number of hits per that IP
num_hits = hits.values()
# indexes will facilitate in drawing as index of IP addresses
indexes = np.arange(len(hits))
plt.bar(indexes, num_hits,width=0.5)
plt.xlabel("Source IP Addresses")
plt.ylabel("Number of hits")
plt.show()
plt.scatter(indexes,pd.Series(hits))
num_of_IPs = len(hits)
suspects = {k:v for k,v in hits.items() if(v > 10000)}
print('There are %s attacker out of %s suspected IPs\n' %(len(suspects), len(hits)))
suspects
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating a sparse matrix
Step2: From coordinates
Step3: Convert back to dense matrix
Step4: Compressed Sparse Row and Column formats
Step5: Casting from COO format
Step6: COO summation convention
Step7: Application
Step8: Creating an $n$ by $n$ confusion matrix
Step9: Application
Step10: Construct the sparse adjacency matrix
Step11: Construct the transition matrix
Step12: Modify the transition matrix with a damping factor
Step13: Visualize the airport connections graph and label the top and bottom 5 airports by pagerank
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
from scipy import sparse
import scipy.sparse.linalg as spla
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('notebook', font_scale=1.5)
A = np.random.poisson(0.2, (5,15)) * np.random.randint(0, 10, (5, 15))
A
rows, cols = np.nonzero(A)
vals = A[rows, cols]
vals
rows
cols
X1 = sparse.coo_matrix(A)
X1
print(X1)
X2 = sparse.coo_matrix((vals, (rows, cols)))
X2
print(X2)
X2.todense()
np.vstack([rows, cols])
indptr = np.r_[np.searchsorted(rows, np.unique(rows)), len(rows)]
indptr
X3 = sparse.csr_matrix((vals, cols, indptr))
X3
X3.todense()
X4 = X2.tocsr()
X4
rows = np.r_[np.zeros(4), np.ones(4)]
cols = np.repeat([0,1], 4)
vals = np.arange(8)
rows
cols
vals
X5 = sparse.csr_matrix((vals, (rows, cols)))
print(X5)
obs = np.random.randint(0, 2, 100)
pred = np.random.randint(0, 2, 100)
vals = np.ones(100).astype('int')
pred
vals.shape, obs.shape , pred.shape
X6 = sparse.coo_matrix((vals, (pred, obs)))
X6.todense()
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
iris = datasets.load_iris()
knn = KNeighborsClassifier()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,
test_size=0.5, random_state=42)
pred = knn.fit(X_train, y_train).predict(X_test)
X7 = sparse.coo_matrix((np.ones(len(pred)).astype('int'), (pred, y_test)))
pd.DataFrame(X7.todense(), index=iris.target_names, columns=iris.target_names)
data = pd.read_csv('data/airports.csv', usecols=[0,1])
data.shape
data.head()
lookup = pd.read_csv('data/names.csv', index_col=0)
lookup.shape
lookup.head()
import networkx as nx
g = nx.from_pandas_dataframe(data, source='ORIGIN_AIRPORT_ID', target='DEST_AIRPORT_ID')
airports = np.array(g.nodes())
adj_matrix = nx.to_scipy_sparse_matrix(g)
out_degrees = np.ravel(adj_matrix.sum(axis=1))
diag_matrix = sparse.diags(1 / out_degrees).tocsr()
M = (diag_matrix @ adj_matrix).T
n = len(airports)
d = 0.85
I = sparse.eye(n, format='csc')
A = I - d * M
b = (1-d) / n * np.ones(n) # so the sum of all page ranks is 1
A.todense()
from scipy.sparse.linalg import spsolve
r = spsolve(A, b)
r.sum()
idx = np.argsort(r)
top10 = idx[-10:][::-1]
bot10 = idx[:10]
df = lookup.loc[airports[top10]]
df['degree'] = out_degrees[top10]
df['pagerank']= r[top10]
df
df = lookup.loc[airports[bot10]]
df['degree'] = out_degrees[bot10]
df['pagerank']= r[bot10]
df
import warnings
labels = {airports[i]: lookup.loc[airports[i]].str.split(':').str[0].values[0]
for i in np.r_[top10[:5], bot10[:5]]}
with warnings.catch_warnings():
warnings.simplefilter('ignore')
nx.draw(g, pos=nx.spring_layout(g), labels=labels,
node_color='blue', font_color='red', alpha=0.5,
node_size=np.clip(5000*r, 1, 5000*r), width=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 分析数据
Step2: 练习
Step3: 问题 1
Step4: 问题 2
Step5: 问题 3
Step6: 观察
Step7: 练习
Step8: 问题 4
Step9: 问题 5
Step10: 练习:降维
Step11: 观察
Step12: 可视化一个双标图(Biplot)
Step13: 观察
Step14: 问题 7
Step15: 练习
Step16: 问题 8
Step17: 回答
Step18: 回答
Step19: 回答:
|
<ASSISTANT_TASK:>
Python Code:
%%time
# 引入这个项目需要的库
import numpy as np
import pandas as pd
import visuals as vs
from IPython.display import display # 使得我们可以对DataFrame使用display()函数
# 设置以内联的形式显示matplotlib绘制的图片(在notebook中显示更美观)
%matplotlib inline
# 载入整个客户数据集
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
# 显示数据集的一个描述
display(data.describe())
# TODO:从数据集中选择三个你希望抽样的数据点的索引
indices = [90, 200, 265]
# 为选择的样本建立一个DataFrame
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
# Code review modify for report_20170808.html
# 感觉图的效果并不明显,问题8中使用图表效果好【newdata.rank(呈现表格),sns.heatmap(呈现带颜色的表格)】
# 这里加上,以供未来参考,感谢评审者的建议
((samples.median()) / data.median()).plot.bar(figsize=(10,4), title='Samples compared to MEDIAN')
((samples.mean()) / data.mean()).plot.bar(figsize=(10,4), title='Samples compared to MEAN');
%%time
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
# TODO:为DataFrame创建一个副本,用'drop'函数丢弃一些指定的特征
grocery = data['Grocery']
data_without_grocery = data.drop(['Grocery'],1)
# display(data.head(1))
# display(fresh.head())
# display(new_data.head())
# TODO:使用给定的特征作为目标,将数据分割成训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(data_without_grocery, grocery, test_size=0.25, random_state=20)
# display(X_train.head(1))
# display(X_test.head(1))
# display(y_train.head(1))
# display(y_test.head(1))
# # TODO:创建一个DecisionTreeRegressor(决策树回归器)并在训练集上训练它
regressor = DecisionTreeRegressor(max_depth=15, random_state=20)
regressor.fit(X_train, y_train)
# # TODO:输出在测试集上的预测得分
score = regressor.score(X_test, y_test)
print score
# 对于数据中的每一对特征构造一个散布矩阵
pd.plotting.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
%pdb
# 数据关联度表格呈现
import seaborn
seaborn.heatmap(data.corr(), annot=True)
# 添加这些代码以后,导致所有曲线图背景颜色显示异常
%%time
from scipy import stats
display(data.head())
# TODO:使用自然对数缩放数据
# log_data = data.apply(lambda x: np.log(x))
log_data = np.log(data)
display(log_data.head())
# TODO:使用自然对数缩放样本数据
log_samples = np.log(samples)
display(log_samples.head())
# 为每一对新产生的特征制作一个散射矩阵
pd.plotting.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# 展示经过对数变换后的样本数据
display(log_samples)
import numpy as np
# 用于记录所有的异常值的索引
allOutliers = []
# 对于每一个特征,找到值异常高或者是异常低的数据点
for feature in log_data.keys():
# TODO:计算给定特征的Q1(数据的25th分位点)
Q1 = np.percentile(log_data[feature], 25)
# TODO:计算给定特征的Q3(数据的75th分位点)
Q3 = np.percentile(log_data[feature], 75)
# TODO:使用四分位范围计算异常阶(1.5倍的四分位距)
step = 1.5*(Q3-Q1)
# 显示异常点
print "Data points considered outliers for the feature '{}':".format(feature)
display(log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))])
outlier = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
allOutliers.extend(outlier.index)
# 找出重复的索引值
sortedAllOutliers = np.sort(allOutliers)
duplicatedOutliers = []
preElement = -1
for element in sortedAllOutliers.flat:
if element == preElement and element not in duplicatedOutliers:
duplicatedOutliers.append(element);
preElement = element
print "sortedAllOutliers:{0}".format(sortedAllOutliers)
print "duplicatedOutliers: {0}".format(duplicatedOutliers)
# 可选:选择你希望移除的数据点的索引
outliers = np.unique(sortedAllOutliers)
print "outliers: {}".format(outliers)
# 如果选择了的话,移除异常点
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
%%time
from sklearn.decomposition import PCA
# TODO:通过在good data上使用PCA,将其转换成和当前特征数一样多的维度
pca = PCA(n_components=good_data.shape[1], random_state=20)
pca.fit(good_data)
# TODO:使用上面的PCA拟合将变换施加在log_samples上
pca_samples = pca.transform(log_samples)
# 生成PCA的结果图
pca_results = vs.pca_results(good_data, pca)
# 展示经过PCA转换的sample log-data
display(samples)
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
%%time
from sklearn.decomposition import PCA
# TODO:通过在good data上进行PCA,将其转换成两个维度
pca = PCA(n_components = 2, random_state=20)
pca.fit(good_data)
# TODO:使用上面训练的PCA将good data进行转换
reduced_data = pca.transform(good_data)
# TODO:使用上面训练的PCA将log_samples进行转换
pca_samples = pca.transform(log_samples)
# 为降维后的数据创建一个DataFrame
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# 展示经过两个维度的PCA转换之后的样本log-data
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# Create a biplot
vs.biplot(good_data, reduced_data, pca)
%%time
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
# TODO:在降维后的数据上使用你选择的聚类算法
clusterer = GaussianMixture(n_components=2, random_state=20).fit(reduced_data)
# TODO:预测每一个点的簇
preds = clusterer.predict(reduced_data)
# TODO:找到聚类中心
centers = clusterer.means_
print "centers: \n{0}".format(centers)
# TODO:预测在每一个转换后的样本点的类
sample_preds = clusterer.predict(pca_samples)
# TODO:计算选择的类别的平均轮廓系数(mean silhouette coefficient)
score = silhouette_score(reduced_data, preds)
print "silhouette_score: {0}".format(score)
print pca_samples
# 从已有的实现中展示聚类的结果
vs.cluster_results(reduced_data, preds, centers, pca_samples)
log_centers = pca.inverse_transform(centers)
print log_centers
# TODO:反向转换中心点
log_centers = pca.inverse_transform(centers)
# TODO:对中心点做指数转换
true_centers = np.exp(log_centers)
# 显示真实的中心点
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
import seaborn as sns
import matplotlib.pyplot as plt
# add the true centers as rows to our original data
newdata = data.append(true_centers)
# show the percentiles of the centers
ctr_pcts = 100. * newdata.rank(axis=0, pct=True).loc[['Segment 0', 'Segment 1']].round(decimals=3)
print ctr_pcts
# visualize percentiles with heatmap
sns.heatmap(ctr_pcts, annot=True, cmap='Greens', fmt='.1f', linewidth=.1, square=True, cbar=False)
plt.xticks(rotation=45, ha='center')
plt.yticks(rotation=0)
plt.title('Percentile ranks of\nsegment centers');
# 显示预测结果
display(samples.head())
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# 读取包含聚类结果的数据
cluster_data = pd.read_csv("cluster.csv")
y = cluster_data['Region']
X = cluster_data.drop(['Region'], axis = 1)
# 划分训练集测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=24)
clf = RandomForestClassifier(random_state=24)
clf.fit(X_train, y_train)
print "使用cluster特征的得分", clf.score(X_test, y_test)
# 移除cluster特征
X_train = X_train.copy()
X_train.drop(['cluster'], axis=1, inplace=True)
X_test = X_test.copy()
X_test.drop(['cluster'], axis=1, inplace=True)
clf.fit(X_train, y_train)
print "不使用cluster特征的得分", clf.score(X_test, y_test)
# 根据‘Channel‘数据显示聚类的结果
vs.channel_results(reduced_data, outliers, pca_samples)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: OK, so that's pretty neat, the FFT paper's R code runs in quadratic time, neatly quadrupling in runtime for every doubling of input. The numpy versions are a bit noisier in their scaling but uniformly faster.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(1234)
n = 1000
p = np.random.rand(n)
p.sort()
def shift(x, y):
y[1:] = x[:-1]
y[0] = 0
def cl(p, k):
p = p.copy()
p.sort()
xi = np.zeros(k)
xi[0] = 1
xi2 = np.zeros(k)
shift(xi, xi2)
for pp in p:
xi *= (1 - pp)
xi += pp * xi2
shift(xi, xi2)
return xi
pmf = cl(p, 1001)
samples = 10 * 1000
samples = np.sum(np.random.rand(samples, len(p)) < p, axis=1)
%matplotlib inline
from matplotlib import pyplot as plt
plt.hist(samples, density=True, label='empirical', bins=30)
plt.plot(pmf, label='CL')
plt.legend(bbox_to_anchor=(1.01, 0.5), loc='center left')
plt.xlim(400, 600)
plt.xlabel("k")
plt.ylabel(r"$\xi_k$")
plt.title(r"Poisson Binomial $n=1000$, $p_j\sim Unif[0,1]$")
plt.show()
def dft_cf(p):
n = len(p)
phi = np.zeros(n + 1) + 0j
phi[0] = 1
for k in range(1, 1 + n // 2):
phi[k] = np.prod((1 - p + p * np.exp(2j * np.pi * k / (n + 1))))
phi[n // 2 + 1:] = np.conj(phi[n // 2:n % 2:-1])
return np.fft.fft(phi / (n + 1)).real
np.allclose(dft_cf(p), cl(p, n + 1))
%%script Rscript --vanilla /dev/stdin
# poibin is the R package by the DFT-CF author
library(poibin)
set.seed(1234)
prev <- 1
for (i in 10:14) {
n <- 2 ^ i
tot <- 0
for (j in 1:5) {
p <- runif(n)
ptm <- proc.time()
invisible(dpoibin(kk=0:n, pp=p))
ptm <- proc.time() - ptm
ptm <- ptm["elapsed"]
tot <- tot + ptm
}
ratio <- tot / prev
prev <- tot
write(sprintf("2^%2d %7.3f (%5.1fx prev)", i, prev / 5, ratio), file=stdout())
}
from time import time
def bench(f):
np.random.seed(1234)
prev = 1
for i in range(10, 15):
n = 2 ** i
tot = 0
for _ in range(5):
p = np.random.rand(n)
t = time()
f(p)
tot += time() - t
ratio = tot / prev
prev = tot
print("2^{:2d} {:7.3f} ({:5.1f}x prev)".format(i, prev / 5, ratio))
print('CL')
bench(lambda p: cl(p, len(p) + 1))
print('DFT-CF')
bench(dft_cf)
from scipy.integrate import fixed_quad, quad
import numpy as np
from numba import jit
def integrand(p):
@jit("float64(float64, float64[:], float64)", nopython=True)
def compiled_f(x, p, k):
return np.real((
# G_X(e^(ix))
np.prod(1 - p * (1 - np.exp(1j * x)))
# e^(-kix)
* np.exp(1j * x * -k)
) / 2 / np.pi)
def f(x, k):
return compiled_f(x, p, k)
return f
def query(p):
f = integrand(p)
def queryk(k):
return quad(lambda x: f(x, k), -np.pi, np.pi)[0]
return queryk
np.random.seed(1234)
n = 10000
p = np.random.rand(n)
p.sort()
queries = n // 2 - 1, n // 2, n // 2 + 1
t = time()
pmf = cl(p, len(p) + 1)
pmf_vals = [pmf[k] for k in queries]
cl_t = time() - t
q = query(p)
t = time()
q_vals = [q(k) for k in queries]
q_t = time() - t
print('cl time', cl_t)
print('q time', q_t)
with np.printoptions(precision=5):
print('cl vals', np.array(pmf_vals))
print('q vals', np.array(q_vals))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Synthetic image generation function
Step2: Batch image generation function
Step3: Let's start with 1000 training samples and 100 test samples
Step4: We reshaped the images to be squared just because it's easier. Here's how they look
Step5: CNN
Step6: Train
Step7: Previous run took ~10mins
Step8: Not bad at all!
Step9: Trying to lineup things
|
<ASSISTANT_TASK:>
Python Code:
import pylab as plt
# %matplotlib inline
import numpy as np
def gen_im(n=1):
rn = np.random.uniform(low=0, high=1, size=(20,))
# Make a random plot...
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rn,"r")
ax.grid()
DPI = fig.get_dpi()
fig.set_size_inches(150/float(DPI),150/float(DPI))
# fig.set_size_inches(3,3)
fig.canvas.draw()
# Now we can save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close('all')
return data, rn, fig.canvas.get_width_height()[::-1]
_,_,size = gen_im(n=1)
def batch_gen(n=1):
for i in range(n):
if i==0:
arr, vals, size = gen_im()
else:
tmparr, tmpvals, size = gen_im()
arr = np.vstack((arr,tmparr))
vals = np.vstack((vals,tmpvals))
arr = arr.reshape(arr.shape[0], *size, 3)[:,:,:,:]
return arr, vals
x_train, y_train = batch_gen(n=1000)
x_test, y_test = batch_gen(n=100)
plt.imshow(x_train[0,:].reshape(*size,3))
plt.show()
import keras
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.layers.convolutional import ZeroPadding2D
from keras.layers import Activation
from keras.optimizers import SGD
import matplotlib.pylab as plt
batch_size = 1
num_classes = 20
epochs = 10
# input image dimensions
img_x, img_y = size
input_shape = (img_x, img_y, 3)
print('x_train shape:', x_train.shape)
filter_size = 3
pool_size = 2
model = Sequential([
Conv2D(32, (6, 6), input_shape=input_shape, data_format="channels_last", activation='relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
Conv2D(64, (filter_size, filter_size), data_format="channels_last", activation='relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
Conv2D(128, (filter_size, filter_size), data_format="channels_last", activation='relu'),
# # MaxPooling2D(pool_size=(pool_size, pool_size)),
Conv2D(128, (filter_size, filter_size), data_format="channels_last", activation='relu'),
# # MaxPooling2D(pool_size=(pool_size, pool_size)),
Flatten(),
# Dropout(0.4),
# Dense(1000, activation='relu'),
Dropout(0.4),
Dense(num_classes, activation='linear'),
])
model.compile('adadelta', 'mse')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[history])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score)
model.predict(x_test[0].reshape(1,*size, 3))
plt.plot(model.predict(x_test)[0])
plt.plot(y_test[0])
plt.show()
from PIL import Image
im = Image.open("log_crop_smaller.png")
im
im = im.resize((150,150)).rotate(90)
im
im_arr = np.array(im.getdata())[:,:-1].reshape(1, im.size[1], im.size[0], 3)
plt.plot(model.predict(im_arr)[0])
plt.show()
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.imshow(im, extent=(-2, 19, 0, 1), aspect=4)
ax0.set_title('Real Log')
ax0.set_xlim(-3,20)
ax1.plot(model.predict(im_arr)[0],'r')
ax1.set_title('Predicted Log')
ax1.set_ylim(0,1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: We create average power time courses for each frequency band
Step4: Now we can compute the Global Field Power
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import somato
from mne.baseline import rescale
from mne.stats import bootstrap_confidence_interval
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# let's explore some frequency bands
iter_freqs = [
('Theta', 4, 7),
('Alpha', 8, 12),
('Beta', 13, 25),
('Gamma', 30, 45)
]
# set epoching parameters
event_id, tmin, tmax = 1, -1., 3.
baseline = None
# get the header to extract events
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
frequency_map = list()
for band, fmin, fmax in iter_freqs:
# (re)load the data to save memory
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg='grad', eog=True) # we just look at gradiometers
raw.load_data()
# bandpass filter
raw.filter(fmin, fmax, n_jobs=1, # use more jobs to speed up.
l_trans_bandwidth=1, # make sure filter params are the same
h_trans_bandwidth=1) # in each band and skip "auto" option.
# epoch
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline,
reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
# remove evoked response
epochs.subtract_evoked()
# get analytic signal (envelope)
epochs.apply_hilbert(envelope=True)
frequency_map.append(((band, fmin, fmax), epochs.average()))
del epochs
del raw
# Helper function for plotting spread
def stat_fun(x):
Return sum of squares.
return np.sum(x ** 2, axis=0)
# Plot
fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True)
colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 4))
for ((freq_name, fmin, fmax), average), color, ax in zip(
frequency_map, colors, axes.ravel()[::-1]):
times = average.times * 1e3
gfp = np.sum(average.data ** 2, axis=0)
gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0))
ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)
ax.axhline(0, linestyle='--', color='grey', linewidth=2)
ci_low, ci_up = bootstrap_confidence_interval(average.data, random_state=0,
stat_fun=stat_fun)
ci_low = rescale(ci_low, average.times, baseline=(None, 0))
ci_up = rescale(ci_up, average.times, baseline=(None, 0))
ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3)
ax.grid(True)
ax.set_ylabel('GFP')
ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),
xy=(0.95, 0.8),
horizontalalignment='right',
xycoords='axes fraction')
ax.set_xlim(-1000, 3000)
axes.ravel()[-1].set_xlabel('Time [ms]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 0 - hyperparams
Step2: Step 1 - collect data (and/or generate them)
Step3: Step 2 - Build model
Step4: Step 3 training the network
Step5: TODO Co integration
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import tensorflow as tf
from os import path
import numpy as np
import pandas as pd
import csv
from sklearn.model_selection import StratifiedShuffleSplit
from time import time
from matplotlib import pyplot as plt
import seaborn as sns
from mylibs.jupyter_notebook_helper import show_graph
from tensorflow.contrib import rnn
from tensorflow.contrib import learn
import shutil
from tensorflow.contrib.learn.python.learn import learn_runner
from IPython.display import Image
from IPython.core.display import HTML
from mylibs.tf_helper import getDefaultGPUconfig
from data_providers.binary_shifter_varlen_data_provider import \
BinaryShifterVarLenDataProvider
from data_providers.price_history_varlen_data_provider import PriceHistoryVarLenDataProvider
from models.model_05_price_history_rnn_varlen import PriceHistoryRnnVarlen
from sklearn.metrics import r2_score
from mylibs.py_helper import factors
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from statsmodels.tsa.stattools import coint
dtype = tf.float32
seed = 16011984
random_state = np.random.RandomState(seed=seed)
config = getDefaultGPUconfig()
%matplotlib inline
from common import get_or_run_nn
num_epochs = 10
series_max_len = 60
num_features = 1 #just one here, the function we are predicting is one-dimensional
state_size = 400
target_len = 30
batch_size = 47
csv_in = '../price_history_03a_fixed_width.csv'
npz_path = '../price_history_03_dp_60to30_from_fixed_len.npz'
# XX, YY, sequence_lens, seq_mask = PriceHistoryVarLenDataProvider.createAndSaveDataset(
# csv_in=csv_in,
# npz_out=npz_path,
# input_seq_len=60, target_seq_len=30)
# XX.shape, YY.shape, sequence_lens.shape, seq_mask.shape
dp = PriceHistoryVarLenDataProvider(filteringSeqLens = lambda xx : xx >= target_len,
npz_path=npz_path)
dp.inputs.shape, dp.targets.shape, dp.sequence_lengths.shape, dp.sequence_masks.shape
model = PriceHistoryRnnVarlen(rng=random_state, dtype=dtype, config=config)
graph = model.getGraph(batch_size=batch_size, state_size=state_size,
target_len=target_len, series_max_len=series_max_len)
show_graph(graph)
num_epochs, state_size, batch_size
def experiment():
dynStats, predictions_dict = model.run(epochs=num_epochs,
state_size=state_size,
series_max_len=series_max_len,
target_len=target_len,
npz_path=npz_path,
batch_size=batch_size)
return dynStats, predictions_dict
from os.path import isdir
data_folder = '../../../../Dropbox/data'
assert isdir(data_folder)
dyn_stats, preds_dict = get_or_run_nn(experiment,
filename='001_plain_rnn_60to30', nn_runs_folder= data_folder + '/nn_runs')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
sns.tsplot(data=dp.inputs[ind].flatten())
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
from cost_functions.huber_loss import huber_loss
average_huber_loss = np.mean([np.mean(huber_loss(dp.targets[ind], preds_dict[ind]))
for ind in range(len(dp.targets))])
average_huber_loss
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
num_epochs, state_size, batch_size
cost_func = PriceHistoryRnnVarlen.COST_FUNCS.MSE
def experiment():
dynStats, predictions_dict = model.run(epochs=num_epochs,
cost_func= cost_func,
state_size=state_size,
series_max_len=series_max_len,
target_len=target_len,
npz_path=npz_path,
batch_size=batch_size)
return dynStats, predictions_dict
dyn_stats, preds_dict = get_or_run_nn(experiment,
filename='001_plain_rnn_60to30_mse')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 2
|
<ASSISTANT_TASK:>
Python Code:
# let's load MNIST data as we did in the exercise on MNIST with FC Nets
# %load ../solutions/sol_52.py
## try yourself
## `evaluate` the model on test data
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id=lucky></a>
Step2: Question. What do we have here? A list of length 10? Whose elements are dataframes? Evidently this reads in all the tables from the page into dataframes and collects them in a list.
Step3: Scanning url's
Step4: Yahoo Finance
Step5: SODA API
Step6: Notice that we can choose to interact with either the json or the csv file. They both give us lots of columns (and not very many rows) though and we might want to choose a smaller set of the data. We will show that we can do this using the api described in the second link
Step7: Accessing web pages
Step8: Extracting pieces of web pages
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics
import sys # system module, used to get Python version
import os # operating system tools (check files)
import datetime as dt # date tools, used to note current date
# these are new
import requests, io # internet and input tools
from bs4 import BeautifulSoup # website parsing
%matplotlib inline
print('\nPython version: ', sys.version)
print('Pandas version: ', pd.__version__)
print('Requests version: ', requests.__version__)
print("Today's date:", dt.date.today())
pd.read_html?
# baseball reference
url = 'http://www.baseball-reference.com/players/m/mccutan01.shtml'
am = pd.read_html(url)
print('Ouput has type', type(am), 'and length', len(am))
print('First element has type', type(am[0]))
am[4].head()
# let's see if we're lucky
dfs = pd.read_html("http://finance.yahoo.com/q/hp?s=AAPL+Historical+Prices")
len(dfs)
dfs[9].head(10)
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
symbol='AAPL' ## change this to any stock symbol you'd like to get
begin_day='01'
begin_month='00' # January (note the months run from 0 to 11)
begin_year='2010'
end_day = '31'
end_month = '11' #December
end_year='2014'
freq='d' #d - daily, w - weekly, m - monthly etc..
# the following three lines will construct a url based on the parameters above:
url = 'http://real-chart.finance.yahoo.com/table.csv?s='+symbol
url+= '&a='+begin_month+'&b='+begin_day+'&c='+begin_year
url+= '&d='+end_month+'&e='+end_day+'&f='+end_year+'&g='+freq+'&ignore=.csv'
print (url) ## This should show a simliar stucture to the csv file found before.
# pandas allows us to read the csv file dirctly from the url
df=pd.read_csv(url)
# since all the data is read as a string, it'll be good to convert the date column to a datetime type
df['Date']=pd.to_datetime(df['Date'])
# now we can, for exmaple, plot the Adj.Close column vs. the date:
df.plot(x='Date',y='Adj Close')
base_url_json = "https://www.opendata.go.ke/resource/p452-xb7c.json"
df_json = pd.read_json(base_url_json)
df_json.head(3)
base_url_csv = "https://www.opendata.go.ke/resource/p452-xb7c.csv"
df_csv = pd.read_csv(base_url_csv)
df_csv.columns = df_csv.columns.str.replace(" ", "_").str.lower()
df_csv.head(3)
# First lets select subset of data
base_url_json += "?$select=boys_toilets,girls_toilets,geolocation,name_of_school,level_of_education"
df_json2 = pd.read_json(base_url_json)
df_json2.head()
# Now let's raise the number of observations
base_url_json += "&$limit=5000"
df_json3 = pd.read_json(base_url_json)
df_json3.head(5)
df_json.shape
df_json2.shape
df_json3.shape
baseurl = 'https://www.opendata.go.ke/resource/p452-xb7c.json'
baseurl += '?$select=boys_toilets,girls_toilets,geolocation,name_of_school,level_of_education'
baseurl += '&$limit=50000'
df_big = pd.read_json(baseurl)
df_big.shape
url = 'http://databootcamp.nyuecon.com/'
db = requests.get(url)
db.headers
db.url
db.status_code
db.content[0:500]
bs = BeautifulSoup(db.content, 'lxml')
print('Type and length: ', type(bs), ', ', len(bs), sep='')
print('Title: ', bs.title)
print('First n characters:\n', bs.prettify()[0:250], sep='')
bs.head
bs.title
bs.find_all('a')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Original Article
Step2: First Look
Step3: Under the hood, pandas groups the columns into block of values of the same type
Step4: Subtype
Step5: Optimize Numeric Columns with subtypes
Step6: We can see a drop from 7.9 to 1.5 MB. Now we have 5 uint8 and 1 unit32 instead of 6 int64.
Step7: All our float columns were converted from float64 to float32, give us a 50 reduction in memory usage.
Step8: In order to have the best benefit, we have to optimize the object types.
Step9: You can see that the size of strings when stored in a pandas series are identical to their usage as separate strings in Python.
Step10: A quick glance reveals many columns where there are few unique values relative to the overall ~172,000 games in our data set.
Step11: When we convert columns to category, it's important to be aware of trade-off
Step12: Convert date
Step13: We’ll convert using pandas.to_datetime() function, using the format parameter to tell it that our date data is stored YYYY-MM-DD.
Step14: Selecting Types While Reading the Data In
Step15: Now we can use the dictionary, along with a few parameters for the date to read in the data with the correct types in a few lines
Step16: Analyzing baseball games
|
<ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
# Load Data
gl = pd.read_csv('..\data\game_logs.csv')
# Available also at https://data.world/dataquest/mlb-game-logs
# Data Preview
gl.head()
gl.dtypes.head()
# Select only the column with same type
gl.select_dtypes(include=['object']).head()
#Exact amount of memory usage of df
gl.info(memory_usage='deep')
gl.describe()
# Reference http://www.markhneedham.com/blog/2017/07/05/pandas-find-rows-where-columnfield-is-null/
# Columns with null values
null_columns=gl.columns[gl.isnull().any()]
gl[null_columns].isnull().sum()
# Every row that contains at least one null value
print(gl[gl.isnull().any(axis=1)][null_columns].head())
gl.dtypes.value_counts()
for dtype in gl.dtypes.unique(): #['float','int64','object']:
selected_dtype = gl.select_dtypes(include=[dtype])
mean_usage_b = selected_dtype.memory_usage(deep=True).mean()
mean_usage_mb = mean_usage_b / 1024 ** 2
print("Average memory usage for {} columns: {:03.2f} MB".format(dtype,mean_usage_mb))
import numpy as np
int_types = ["uint8", "int8", "int16"]
for it in int_types:
print(np.iinfo(it))
# We're going to be calculating memory usage a lot,
# so we'll create a function to save us some time!
def mem_usage(pandas_obj):
if isinstance(pandas_obj,pd.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else: # we assume if not a df it's a series
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
return "{:03.2f} MB".format(usage_mb)
mem_usage(gl)
gl_int = gl.select_dtypes(include=['int64'])
converted_int = gl_int.apply(pd.to_numeric,downcast='unsigned')
print(mem_usage(gl_int))
print(mem_usage(converted_int))
compare_ints = pd.concat([gl_int.dtypes,converted_int.dtypes],axis=1)
compare_ints.columns = ['before','after']
compare_ints.apply(pd.Series.value_counts)
gl_float = gl.select_dtypes(include=['float'])
converted_float = gl_float.apply(pd.to_numeric,downcast='float')
print(mem_usage(gl_float))
print(mem_usage(converted_float))
compare_floats = pd.concat([gl_float.dtypes,converted_float.dtypes],axis=1)
compare_floats.columns = ['before','after']
compare_floats.apply(pd.Series.value_counts)
optimized_gl = gl.copy()
optimized_gl[converted_int.columns] = converted_int
optimized_gl[converted_float.columns] = converted_float
print(mem_usage(gl))
print(mem_usage(optimized_gl))
from sys import getsizeof
s1 = 'working out'
s2 = 'memory usage for'
s3 = 'strings in python is fun!'
s4 = 'strings in python is fun!'
for s in [s1, s2, s3, s4]:
print(getsizeof(s))
obj_series = pd.Series(['working out',
'memory usage for',
'strings in python is fun!',
'strings in python is fun!'])
obj_series.apply(getsizeof)
#Where we migh be able to reduce memory?
gl_obj = gl.select_dtypes(include=['object']).copy()
gl_obj.describe()
dow = gl_obj.day_of_week
print(dow.head())
dow_cat = dow.astype('category')
print(dow_cat.head())
# We can see the integer values associated to column
dow_cat.head().cat.codes
# We compare the memory usage
print(mem_usage(dow))
print(mem_usage(dow_cat))
converted_obj = pd.DataFrame()
for col in gl_obj.columns:
num_unique_values = len(gl_obj[col].unique())
num_total_values = len(gl_obj[col])
if num_unique_values / num_total_values < 0.5:
converted_obj.loc[:,col] = gl_obj[col].astype('category')
else:
converted_obj.loc[:,col] = gl_obj[col]
print(mem_usage(gl_obj))
print(mem_usage(converted_obj))
compare_obj = pd.concat([gl_obj.dtypes,converted_obj.dtypes],axis=1)
compare_obj.columns = ['before','after']
compare_obj.apply(pd.Series.value_counts)
# Now we combine with the rest of our dataframe (numeric columns)
optimized_gl[converted_obj.columns] = converted_obj
mem_usage(optimized_gl)
date = optimized_gl.date
print(mem_usage(date))
date.head()
optimized_gl['date'] = pd.to_datetime(date,format='%Y%m%d')
print(mem_usage(optimized_gl))
optimized_gl.date.head()
dtypes = optimized_gl.drop('date',axis=1).dtypes
dtypes.head()
dtypes_col = dtypes.index
dtypes_col
dtypes_type=[i.name for i in dtypes.values]
column_types = dict(zip(dtypes_col, dtypes_type))
#Preview of first 10
{k:v for k,v in list(column_types.items())[:10]}
read_and_optimized = pd.read_csv('..\data\game_logs.csv',dtype=column_types,parse_dates=['date'],infer_datetime_format=True)
print(mem_usage(read_and_optimized))
read_and_optimized.head()
import matplotlib.pyplot as plt
optimized_gl['year'] = optimized_gl.date.dt.year
game_lengths = optimized_gl.pivot_table(index='year', values='length_minutes')
game_lengths.reset_index().plot.scatter('year','length_minutes')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Explore variable in interval, ordinal and nominal variables
Step3: Explore relation pairs in interval, ordinal and nominal variables
Step4: Compare repeated measures variables with interval, ordinal and nominal variables
Step5: Compare groups in interval, ordinal and nominal dependent variables, with one or two grouping variables with 2 or 3 group levels
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import warnings
warnings.filterwarnings('ignore')
from cogstat import cogstat as cs
print(cs.__version__)
cs_dir, dummy_filename = os.path.split(cs.__file__) # We use this for the demo data
# Load some data
data = cs.CogStatData(data=os.path.join(cs_dir, 'sample_data', 'example_data.csv'))
# Display the data
cs.display(data.print_data())
### Explore variable ###
# Get the most important statistics of a single variable
cs.display(data.explore_variable('X'))
cs.display(data.explore_variable('Z'))
cs.display(data.explore_variable('CONDITION'))
### Explore variable pair ###
# Get the statistics of a variable pair
cs.display(data.explore_variable_pair('X', 'Y'))
cs.display(data.explore_variable_pair('Z', 'ZZ'))
cs.display(data.explore_variable_pair('TIME', 'CONDITION'))
### Behavioral data diffusion analyses ###
# cs.display(data.diffusion(error_name=['error'], RT_name=['RT'], participant_name=['participant_id'], condition_names=['loudness', 'side']))
### Compare variables ###
cs.display(data.compare_variables(['X', 'Y'], factors=[]))
cs.display(data.compare_variables(['Z', 'ZZ'], factors=[]))
cs.display(data.compare_variables(['CONDITION', 'CONDITION2'], factors=[]))
### Compare groups ###
cs.display(data.compare_groups('X', grouping_variables=['TIME']))
cs.display(data.compare_groups('X', grouping_variables=['TIME3']))
cs.display(data.compare_groups('Y', grouping_variables=['TIME']))
cs.display(data.compare_groups('Y', grouping_variables=['TIME3']))
cs.display(data.compare_groups('CONDITION', grouping_variables=['TIME']))
cs.display(data.compare_groups('X', grouping_variables=['TIME', 'CONDITION']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If a ery_30-15.sorted.cov does not exist yet, create one using bamtools coverage
Step2: Let's have a quick look at the new coverage file
Step3: The file has three columns
|
<ASSISTANT_TASK:>
Python Code:
import os
!ls
if not os.path.exists("ery_30-15.sorted.cov.gz"):
try:
# 'call' is from the subprocess module
retcode = os.system("bamtools coverage -in ery_30-15.sorted.bam | gzip > ery_30-15.sorted.cov.gz")
if retcode < 0:
print "Child was terminated by signal", -retcode
else:
print "Child returned", retcode
except OSError as e:
print "Execution failed", e
else:
print "file already exists"
!gzip -dc ery_30-15.sorted.cov.gz | head
with os.popen("gzip -dc ery_30-15.sorted.cov.gz | cut -f 3", "r") as cov:
for _ in xrange(10):
print cov.readline().rstrip()
CovCountDict = {}
with os.popen("gzip -dc ery_30-15.sorted.cov.gz | cut -f 3", "r") as cov:
for c in cov:
c = int(c.rstrip())
try:
CovCountDict[c] += 1
except KeyError:
CovCountDict[c] = 1
# sort key-value pairs by key (coverage) and show the first 100:
cov_sorted = sorted(CovCountDict.items(), key=lambda e: e[0])
cov_sorted[:100]
import pylab
%matplotlib inline
pylab.plot([i[0] for i in cov_sorted][:50], [i[1] for i in cov_sorted][:50])
pylab.title("Coverage distribution")
pylab.xlabel("coverage")
pylab.ylabel("count")
coverage = [i[0] for i in cov_sorted]
counts = [i[1] for i in cov_sorted]
pylab.hist(coverage, weights=counts, bins=50, range=(0,50), normed=True, cumulative=True, histtype="step")
pylab.title("Coverage distribution")
pylab.xlabel("coverage")
pylab.ylabel("count")
pylab.grid()
len(count.keys())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Разовое разбиение данных на обучение и тест с помощью train_test_split
Step2: Стратегии проведения кросс-валидации
Step3: StratifiedKFold
Step4: ShuffleSplit
Step5: StratifiedShuffleSplit
Step6: Leave-One-Out
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import cross_validation, datasets
import numpy as np
iris = datasets.load_iris()
train_data, test_data, train_labels, test_labels = cross_validation.train_test_split(iris.data, iris.target,
test_size = 0.3)
#убедимся, что тестовая выборка действительно составляет 0.3 от всех данных
float(len(test_labels))/len(iris.data)
print 'Размер обучающей выборки: {} объектов \nРазмер тестовой выборки: {} объектов'.format(len(train_data),
len(test_data))
print 'Обучающая выборка:\n', train_data[:5]
print '\n'
print 'Тестовая выборка:\n', test_data[:5]
print 'Метки классов на обучающей выборке:\n', train_labels
print '\n'
print 'Метки классов на тестовой выборке:\n', test_labels
for train_indices, test_indices in cross_validation.KFold(10, n_folds = 5):
print train_indices, test_indices
for train_indices, test_indices in cross_validation.KFold(10, n_folds = 2, shuffle = True):
print train_indices, test_indices
for train_indices, test_indices in cross_validation.KFold(10, n_folds = 2, shuffle = True, random_state = 1):
print train_indices, test_indices
target = np.array([0] * 5 + [1] * 5)
print target
for train_indices, test_indices in cross_validation.StratifiedKFold(target, n_folds = 2, shuffle = True, random_state = 0):
print train_indices, test_indices
target = np.array([0, 1] * 5)
print target
for train_indices, test_indices in cross_validation.StratifiedKFold(target, n_folds = 2,shuffle = True):
print train_indices, test_indices
for train_indices, test_indices in cross_validation.ShuffleSplit(10, n_iter = 10, test_size = 0.2):
print train_indices, test_indices
target = np.array([0] * 5 + [1] * 5)
print target
for train_indices, test_indices in cross_validation.StratifiedShuffleSplit(target, n_iter = 4, test_size = 0.2):
print train_indices, test_indices
for train_indices, test_index in cross_validation.LeaveOneOut(10):
print train_indices, test_index
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The company Like2Call offers hosting services for call centers. In order to dimension the staff of operators optimally, the company has collected a lot of data about the activity of the service during 5 consecutive labour days.
Step2: 1. Time between calls
Step3: The company is interested in the time between incoming calls.
Step4: 2. Parameter estimation.
Step5: [2.2]. Plot the log of the likelihood as a function of $s$ (in the appropriate range of $s$) and verify that the ML estimate reaches the maximum.
Step6: 2.2. Bayesian estimation
Step7: [2.4]. Show, in the same plot, the prior and the posterior probability density functions of parameter $S$, as a function of $s$ (in the appropriate range of $s$) and verify that the MAP estimate reaches the maximum of the posterior.
Step8: The prior distribution describes the initial belief about $S$. The figure should show that, for the given prior, the true value of $S$ can be expected to be between 0 and 5. However, the data modifies our knowledge about $S$. After observing the data, we can expect that the true value of $S$ will be somewhere between 15 and 18.
Step9: Note the MAP and the MSE estimates are very similar because the posterior distribution is approximately (although not exactly) symmetric. Also, the MSE estimate is only slightly different from the ML estimate, because we have a large dataset and the influence of the prior distribution decreases when we have much empirical evidence.
Step10: Incidentally, note that, since $\hat{s}_\text{MSE}$ is the posterior mean, the conditional MSE, which is given by,
Step11: 3. An improved data model.
Step12: 3.1. Hour-dependent model
Step13: [3.3] Compute the ML and the MSE estimates for each hour. Store them in vectors sML24 and sMSE24 and plot them as a function of the hour in the day.
Step14: [3.4] One may wonder if spliting the data in segments provides a better model for the time between calls. The joint data likelihood is a useful way to get a first quantitative evaluation of the new model.
Step15: #### 3.3. Posterior distributions
Step16: You should observe that, as expected, each posterior distribution is centered around its respective estimate $\hat{s}_\text{MSE}$, (i.e. around sMSE24[0], sMSE24[8] and sMSE24[16].
Step17: However, you can visually verify that the posterior distributions for $h=0$ and $h=8$ have less variance than that for $h=16$, why? This is because the prior distribution is in agreement with the data for $h=0$ and $h=8$ (in borth cases, $\hat{s}_\text{MSE}$ is smaller than 5). The larger variance for $h=16$ is a consequence of the higher uncertainty about $s$ created by the discrepancy between the prior and the observations.
Step18: [3.7] [OPTIONAL] Show, in the same plot, the posterior distribution of parameter $s$ given only the data at hour $h$ and for all days up to $d$, for $d=1,\ldots, 5$
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, Math, Latex, HTML
from google.colab.output._publish import javascript
url = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.3/latest.js?config=default"
# <SOL>
# ###############
# Data generation
# ###############
# #######################
# Generate starting times
# Generate s using the known prior
np.random.seed(0)
n_calls = 2000 # Total no. of calls in the whole period
H = 24 # Total no. of hours in a day.
n_days = 5
# Day of each call, uniformly at randomm between 0 and n_days-1
day = np.random.randint(0, high=n_days, size=n_calls)
# Hours uniformly at random
t_d_0 = np.random.uniform(0, H, size=n_calls)
# Hours from a beta distribution
t_d_1 = H * np.random.beta(9, 4, size=n_calls)
# Hours from a mixture of uniform and beta distributions
b = np.random.binomial(1, 0.1, size=n_calls)
t = day * H + b * t_d_0 + (1-b) * t_d_1
t = np.sort(t)
np.save('dataset.npy', t)
# </SOL>
t = np.load('dataset.npy')
# Histograms of call times
# <SOL>
plt.hist(t, bins=100)
plt.xlabel('Time (hours)')
plt.ylabel('No. of incoming calls')
plt.xlim([0, n_days*H])
plt.show()
# </SOL>
# <SOL>
x_all = np.diff(t)
plt.figure(figsize=(13,3))
plt.hist(x_all, bins=800)
plt.xlim([0, max(x_all)/10])
plt.xlabel('Time (hours)')
plt.show()
# </SOL>
# z = <FILL IN>
z = np.sum(x_all)
# K = <FILL IN>
K = len(x_all)
# sML = <FILL IN>
sML = K / z
javascript(url=url)
Math(r'\hat{s}_\text{ML} = ' + str(sML))
# <SOL>
s = np.linspace(1, 50, 101)
def Lp(s, z, K):
return K*np.log(s) - s*z
Lmax = Lp(sML, z, K)
Ls = Lp(s, z, K)
plt.plot(s, Ls)
plt.stem([sML], [Lmax])
plt.xlim((s[0], s[-1]))
plt.ylim([np.min(Ls), plt.ylim()[1]])
plt.xlabel('$s$')
plt.ylabel('Log-likelihood')
plt.show()
# </SOL>
# <FILL IN>
# sMAP = <FILL IN>
sMAP = K / (z + 1)
javascript(url=url)
Math(r'\hat{s}_\text{MAP} = ' + str(sMAP))
# <SOL>
def PP(s, z, K):
logPP = K*np.log(s) + (K+1)*np.log(z+1) - s*(z+1) - np.sum(np.log(range(1, K+1)))
return np.exp(logPP)
s = np.linspace(1, 25, 1000)
plt.plot(s, PP(s, z, K), label='Posterior: $p_{S|\mathbf{X}}(s|\mathbf{x})$')
plt.plot(s, np.exp(-s), label='Prior $p_{S}(s)$')
plt.stem([sMAP], [PP(sMAP, z, K)])
plt.xlim((s[0], s[-1]))
plt.ylim((0, plt.ylim()[1]))
plt.xlabel('$s$')
plt.legend(loc='upper left')
plt.show()
# </SOL>
# sMSE = <FILL IN>
sMSE = (K + 1) / (z + 1)
javascript(url=url)
Math(r'\hat{s}_\text{MSE} = ' + str(sMSE))
print("The minimum MSE is given by ")
# mMSE = <FILL IN>
mMSE = (K+1)/(z+1)**2
javascript(url=url)
Math(r'\text{MSE} = \frac{K+1}{(z +1)^2} = ' + str(mMSE) )
from scipy.stats import gamma
# <SOL>
sc = 1/(z+1)
upper = sMSE + 2*np.sqrt(mMSE)
lower = sMSE - 2*np.sqrt(mMSE)
p = 1 - (gamma.cdf(upper, K+1, scale=sc) - gamma.cdf(lower, K+1, scale=sc))
# </SOL>
javascript(url=url)
Math(r'P\left\{\hat{s}_\text{MSE} - 2 \sqrt{v_\text{MSE}} \le S \le ' +
r'\hat{s}_\text{MSE} + 2 \sqrt{v_\text{MSE}}\right\} = ' + str(p) )
# <SOL>
plt.figure(figsize=(13,3))
plt.plot(t[:-1], x_all)
plt.xlim([0, np.max(t)])
plt.ylim([0, 1.05*np.max(x_all)])
plt.xlabel('Time of the incoming call (hours)')
plt.ylabel('Time between calls (hours)')
plt.show()
# </SOL>
# <SOL>
z24 = np.zeros(24)
K24 = np.zeros(24).astype(int)
h2 = np.fix(t) % 24
hx = list(zip(h2, x_all))
for h in range(24):
x24 = np.array([u[1] for u in hx if u[0] == h])
z24[h] = np.sum(x24)
K24[h] = x24.shape[0]
# </SOL>
# Check if your variables are ok.
# (Note that his is not a full test. Passing it does not
# guarantee that variables have been correctly computed)
if np.sum(K24) == len(x_all):
print("Test for variable K passed.")
else:
print("Error in variable K.")
if np.sum(z24) == np.sum(x_all):
print("Test for variable z passed.")
else:
print("Error in variable z.")
for h in range(24):
x24 = x_all[h==h2[:-1]]
z24[h] = np.sum(x24)
K24[h] = x24.shape[0]
# </SOL>
# Check if your variables are ok.
# (Note that his is not a full test. Passing it does not
# guarantee that variables have been correctly computed)
if np.sum(K24) == len(x_all):
print("Test for variable K passed.")
else:
print("Error in variable K.")
if np.sum(z24) == np.sum(x_all):
print("Test for variable z passed.")
else:
print("Error in variable z.")
# <SOL>
sML24 = K24 / z24
sMSE24 = (K24+1) / (z24+1)
plt.plot(sML24, ':.', label='sML')
plt.plot(sMSE24, ':.', c='red', label='sMSE')
plt.xlabel('Hour')
plt.ylabel('s')
plt.xlim(0, 23)
plt.legend(loc='upper left')
plt.show()
# </SOL>
# <SOL>
# Maximum log-likelihood per each hour
Lp24 = [Lp(sML24[h], z24[h], K24[h]) for h in range(24)]
# Overall maximum likelihood
Lmax24 = np.sum(Lp24)
# </SOL>
print('Maximum log-likelihood of the simple model: {}'.format(Lmax))
print('Maximum log-likelihood of the hour-dependent model: {}'.format(Lmax24))
# <SOL>
s = np.linspace(1, 50, 1000)
sMAP24 = K24 / (z24 + 1)
plt.figure()
for h in np.arange(0, 24, 4):
plt.plot(s, PP(s, z24[h], K24[h]), label='h = {}'.format(h))
plt.plot(sMAP24[h], PP(sMAP24[h], z24[h], K24[h]),'*r')
plt.plot(sMSE24[h], PP(sMSE24[h], z24[h], K24[h]),'*g')
#plt.plot(s, np.exp(-s), label='Prior $p_{S}(s)$')
plt.xlim((s[0], s[-1]))
plt.ylim((0, plt.ylim()[1]))
plt.legend()
plt.show()
# </SOL>
print('sMSE24[0] = {}'.format(sMSE24[0]))
print('sMSE24[8] = {}'.format(sMSE24[8]))
print('sMSE24[16] = {}'.format(sMSE24[16]))
# <SOL>
tx = list(zip(t, x_all))
h0 = 16
z5 = np.zeros(n_days)
K5 = np.zeros(n_days).astype(int)
for d in range(5):
x5 = np.array([u[1] for u in tx if u[0] < 24*(d+1) and np.fix(u[0]) % 24 == h0])
z5[d] = np.sum(x5)
K5[d] = x5.shape[0]
mmse5 = (K5+1)/(z5+1)**2
plt.figure()
plt.stem(mmse5)
plt.ylim(0, plt.ylim()[1])
plt.show()
# </SOL>
# <SOL>
tx = list(zip(t, x_all))
h0 = 16
plt.figure()
for d in range(n_days):
plt.plot(s, PP(s, z5[d], K5[d]), label='d = {}'.format(d))
plt.xlim((s[0], s[-1]))
plt.ylim((0, plt.ylim()[1]))
plt.legend()
plt.show()
# </SOL>
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
x = [[[[1, 2, 3], [2, 3, 4], [0, 0, 0]],
[[1, 2, 3], [2, 0, 4], [3, 4, 5]],
[[1, 2, 3], [0, 0, 0], [0, 0, 0]],
[[1, 2, 3], [1, 2, 3], [0, 0, 0]]],
[[[1, 2, 3], [0, 1, 0], [0, 0, 0]],
[[1, 2, 3], [2, 3, 4], [0, 0, 0]],
[[1, 2, 3], [0, 0, 0], [0, 0, 0]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]]]]
x = tf.convert_to_tensor(x, dtype=tf.float32)
def g(x):
non_zero = tf.cast(x != 0, tf.float32)
y = tf.reduce_sum(x, axis=-2) / tf.reduce_sum(non_zero, axis=-2)
return y
result = g(x.__copy__())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The zero pitch value correspond to unvoiced audio segments with a very low pitch confidence according to the algorithm's estimation. You can force estimations on those as well by setting the guessUnvoiced parameter.
Step2: Note segmentation and converting to MIDI
Step3: We can now export results to a MIDI file. We will use mido Python package (which you can install with pip install mido) to do generate the .mid file. You can test the result using the generated .mid file in a DAW.
|
<ASSISTANT_TASK:>
Python Code:
# For embedding audio player
import IPython
# Plots
import matplotlib.pyplot as plt
from pylab import plot, show, figure, imshow
plt.rcParams['figure.figsize'] = (15, 6)
import numpy
import essentia.standard as es
audiofile = '../../../test/audio/recorded/flamenco.mp3'
# Load audio file.
# It is recommended to apply equal-loudness filter for PredominantPitchMelodia.
loader = es.EqloudLoader(filename=audiofile, sampleRate=44100)
audio = loader()
print("Duration of the audio sample [sec]:")
print(len(audio)/44100.0)
# Extract the pitch curve
# PitchMelodia takes the entire audio signal as input (no frame-wise processing is required).
pitch_extractor = es.PredominantPitchMelodia(frameSize=2048, hopSize=128)
pitch_values, pitch_confidence = pitch_extractor(audio)
# Pitch is estimated on frames. Compute frame time positions.
pitch_times = numpy.linspace(0.0,len(audio)/44100.0,len(pitch_values) )
# Plot the estimated pitch contour and confidence over time.
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(pitch_times, pitch_values)
axarr[0].set_title('estimated pitch [Hz]')
axarr[1].plot(pitch_times, pitch_confidence)
axarr[1].set_title('pitch confidence')
plt.show()
IPython.display.Audio(audiofile)
from mir_eval.sonify import pitch_contour
from tempfile import TemporaryDirectory
temp_dir = TemporaryDirectory()
# Essentia operates with float32 ndarrays instead of float64, so let's cast it.
synthesized_melody = pitch_contour(pitch_times, pitch_values, 44100).astype(numpy.float32)[:len(audio)]
es.AudioWriter(filename=temp_dir.name + 'flamenco_melody.mp3', format='mp3')(es.StereoMuxer()(audio, synthesized_melody))
IPython.display.Audio(temp_dir.name + 'flamenco_melody.mp3')
onsets, durations, notes = es.PitchContourSegmentation(hopSize=128)(pitch_values, audio)
print("MIDI notes:", notes) # Midi pitch number
print("MIDI note onsets:", onsets)
print("MIDI note durations:", durations)
import mido
PPQ = 96 # Pulses per quarter note.
BPM = 120 # Assuming a default tempo in Ableton to build a MIDI clip.
tempo = mido.bpm2tempo(BPM) # Microseconds per beat.
# Compute onsets and offsets for all MIDI notes in ticks.
# Relative tick positions start from time 0.
offsets = onsets + durations
silence_durations = list(onsets[1:] - offsets[:-1]) + [0]
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
for note, onset, duration, silence_duration in zip(list(notes), list(onsets), list(durations), silence_durations):
track.append(mido.Message('note_on', note=int(note), velocity=64,
time=int(mido.second2tick(duration, PPQ, tempo))))
track.append(mido.Message('note_off', note=int(note),
time=int(mido.second2tick(silence_duration, PPQ, tempo))))
midi_file = temp_dir.name + '/extracted_melody.mid'
mid.save(midi_file)
print("MIDI file location:", midi_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run Beam pipeline locally
Step2: Display preprocessing data
Step3: Train the model
Step4: Predictions
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
print(tf.version.VERSION)
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
!cat run_dataflow.sh
!./run_dataflow.sh > /dev/null 2>&1
!ls -l flower_tftransform/
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
IMG_HEIGHT = 448
IMG_WIDTH = 448
IMG_CHANNELS = 3
CLASS_NAMES = 'daisy dandelion roses sunflowers tulips'.split()
ds = tf.data.experimental.make_batched_features_dataset(
'./flower_tftransform/train-00000-of-00016.gz',
batch_size=5,
features = {
'image': tf.io.FixedLenFeature([IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS], tf.float32),
'label': tf.io.FixedLenFeature([], tf.string),
'label_int': tf.io.FixedLenFeature([], tf.int64)
},
reader=lambda filenames: tf.data.TFRecordDataset(filenames, compression_type='GZIP')
)
for feats in ds.take(1):
print(feats['image'].shape)
f, ax = plt.subplots(1, 5, figsize=(15,15))
for feats in ds.take(1):
for idx in range(5): # batchsize
ax[idx].imshow((feats['image'][idx].numpy()));
ax[idx].set_title(feats['label'][idx].numpy())
ax[idx].axis('off')
# Helper functions
def training_plot(metrics, history):
f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5))
for idx, metric in enumerate(metrics):
ax[idx].plot(history.history[metric], ls='dashed')
ax[idx].set_xlabel("Epochs")
ax[idx].set_ylabel(metric)
ax[idx].plot(history.history['val_' + metric]);
ax[idx].legend([metric, 'val_' + metric])
import tensorflow_hub as hub
import os
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
def create_preproc_dataset(pattern, batch_size):
return tf.data.experimental.make_batched_features_dataset(
pattern,
batch_size=batch_size,
features = {
'image': tf.io.FixedLenFeature([IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS], tf.float32),
'label': tf.io.FixedLenFeature([], tf.string),
'label_int': tf.io.FixedLenFeature([], tf.int64)
},
reader=lambda filenames: tf.data.TFRecordDataset(filenames, compression_type='GZIP'),
num_epochs=1
).map(
lambda x: (x['image'], x['label_int'])
)
# parameterize to the values in the previous cell
# WARNING! training on a small subset dataset (note top_dir)
def train_and_evaluate(top_dir='./flower_tftransform',
batch_size = 32,
lrate = 0.001,
l1 = 0.,
l2 = 0.,
num_hidden = 16):
regularizer = tf.keras.regularizers.l1_l2(l1, l2)
train_dataset = create_preproc_dataset(os.path.join(top_dir, 'train-*'), batch_size)
eval_dataset = create_preproc_dataset(os.path.join(top_dir, 'valid-*'), batch_size)
layers = [
tf.keras.layers.experimental.preprocessing.CenterCrop(
height=IMG_HEIGHT//2, width=IMG_WIDTH//2,
input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),
),
hub.KerasLayer(
"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4",
trainable=False,
name='mobilenet_embedding'),
tf.keras.layers.Dense(num_hidden,
kernel_regularizer=regularizer,
activation=tf.keras.activations.relu,
name='dense_hidden'),
tf.keras.layers.Dense(len(CLASS_NAMES),
kernel_regularizer=regularizer,
activation='softmax',
name='flower_prob')
]
model = tf.keras.Sequential(layers, name='flower_classification')
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False),
metrics=['accuracy'])
print(model.summary())
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=3)
training_plot(['loss', 'accuracy'], history)
return model
model = train_and_evaluate()
!saved_model_cli show --all --dir ./flower_tftransform/tft/transform_fn
# get some files to do inference on.
filenames = [
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/98992760_53ed1d26a9.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9939430464_5f5861ebab.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9965757055_ff01b5ee6f_n.jpg'
]
img_bytes = [
tf.io.read_file(filename) for filename in filenames
]
label = [
'n/a' for filename in filenames
] # not used in inference
label_int = [
-1 for filename in filenames
] # not used in inference
# by calling the preproc function, we get images of the right size & crop
preproc = tf.keras.models.load_model('./flower_tftransform/tft/transform_fn').signatures['transform_signature']
preprocessed = preproc(img_bytes=tf.convert_to_tensor(img_bytes),
label=tf.convert_to_tensor(label, dtype=tf.string),
label_int=tf.convert_to_tensor(label_int, dtype=tf.int64))
# then we call model.predict() and take the argmx of the result
pred_label_index = tf.math.argmax(model.predict(preprocessed)).numpy()
print(pred_label_index)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 Effect of the variation of the static parameters
Step2: 1.1 Constant energy $\mathcal{H}$, varying force magnitude $F$
Step3: 1.2 Constant magnitude $F$, varying energy $\mathcal{H}$
Step4: 1.2.2 $\mathcal{H} > |F|$
Step5: 2. Relation between total energy, force and end node distance
Step6: 2.2 For varying F
Step7: 3. Relation between rotation and transversal displacements
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
def getOrbitPoints(EI, H, F, nVertex):
if (abs(F) <= H)|(abs(F) < 1.E-8):
theta0 = np.pi
else:
theta0 = np.arccos(H/F)
theta = np.linspace(theta0, -theta0, nVertex)
N = F*np.cos(theta)
Q = -F*np.sin(theta)
M = np.zeros(nVertex)
for i in range(nVertex):
if (abs(H-N[i]) < 1E-8):
M[i] = 0.
else:
M[i] = -np.sqrt(2*EI*(H - N[i]))
return theta0, N, Q, M
def computeElastica(theta0, Q, M, nEdges, nVertex):
h = np.zeros(nEdges)
h[:] = -2*(M[1:] - M[0:-1])/(Q[0:-1] + Q[1:])
length =np.sum(h)
kappa = M / EI
phi = np.zeros(nVertex-1)
rotor = np.zeros(nVertex-1) + 1j*np.zeros(nVertex-1)
phi[0] = theta0 + np.arctan(h[0]*kappa[0]/2)
phi[1:] = 2.*np.arctan(kappa[1:-1]*(h[0:-1] + h[1:])/4.)
rotor[0] = np.exp(1j*phi[0])
rotor[1:] = (4./(h[0:-1] + h[1:]) + 1j*kappa[1:-1])/(4./(h[0:-1] + h[1:]) - 1j * kappa[1:-1])
gamma = np.zeros(nVertex) + 1j*np.zeros(nVertex)
gamma[0] = 0.+0j
gamma[1] = gamma[0] + h[0]*rotor[0]
for n in range(1, nVertex-1):
gamma[n+1] = gamma[n] + h[n]/h[n-1] * (gamma[n] - gamma[n-1]) * rotor[n]
return phi[0], length, gamma
EI = 5000. #kN m^2
H = 5000. #kN m/m
F = np.arange(-5500., -10500., -500.) #kN
nEdges = 500
nVertex = nEdges + 1
N = np.zeros((len(F), nVertex))
Q = np.zeros((len(F), nVertex))
M = np.zeros((len(F), nVertex))
th0 = np.zeros(len(F))
length = []
gamma = []
for i in range(len(F)):
(th0[i], N[i], Q[i], M[i]) = getOrbitPoints(EI, H, F[i], nVertex)
(ph0, l, g) = computeElastica(th0[i], Q[i], M[i], nEdges, nVertex)
length.append(l)
gamma.append(g)
fig = plt.figure(figsize=(9,9))
ax = fig.gca(aspect='equal')
for i in range(len(F)):
ax.plot(gamma[i].real, gamma[i].imag)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
fig = plt.figure(figsize=(9,9))
ax = fig.gca(projection='3d')
for i in range(len(F)):
ax.plot(N[i], Q[i], M[i])
ax.set_xlabel('$N$')
ax.set_ylabel('$Q$')
ax.set_zlabel('$M$')
EI = 5000. #kN m^2
H = np.arange(-4500., 4500., 500.) #kN m/m
F = -5000. #kN
nEdges = 5000
nVertex = nEdges + 1
N = np.zeros((len(H), nVertex))
Q = np.zeros((len(H), nVertex))
M = np.zeros((len(H), nVertex))
th0 = np.zeros(len(H))
length = []
gamma = []
for i in range(len(H)):
(th0[i], N[i], Q[i], M[i]) = getOrbitPoints(EI, H[i], F, nVertex)
(ph0, l, g) = computeElastica(th0[i], Q[i], M[i], nEdges, nVertex)
length.append(l)
gamma.append(g)
print(length)
fig = plt.figure(figsize=(9,9))
ax = fig.gca(aspect='equal')
for i in range(len(H)):
ax.plot(gamma[i].real, gamma[i].imag)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
fig = plt.figure(figsize=(9,9))
ax = fig.gca(projection='3d')
for i in range(len(H)):
ax.plot(N[i], Q[i], M[i])
ax.set_xlabel('$N$')
ax.set_ylabel('$Q$')
ax.set_zlabel('$M$')
EI = 5000. #kN m^2
H = np.arange(5500., 23500., 1000.) #kN m/m
F = -5000. #kN
nEdges = 5000
nVertex = nEdges + 1
N = np.zeros((len(H), nVertex))
Q = np.zeros((len(H), nVertex))
M = np.zeros((len(H), nVertex))
th0 = np.zeros(len(H))
length = []
gamma = []
for i in range(len(H)):
(th0[i], N[i], Q[i], M[i]) = getOrbitPoints(EI, H[i], F, nVertex)
(ph0, l, g) = computeElastica(th0[i], Q[i], M[i], nEdges, nVertex)
length.append(l)
gamma.append(g)
print(length)
fig = plt.figure(figsize=(9,9))
ax = fig.gca(aspect='equal')
for i in range(len(H)):
ax.plot(gamma[i].real, gamma[i].imag)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
fig = plt.figure(figsize=(9,9))
ax = fig.gca(projection='3d')
for i in range(len(H)):
ax.plot(N[i], Q[i], M[i])
ax.set_xlabel('$N$')
ax.set_ylabel('$Q$')
ax.set_zlabel('$M$')
EI = 5000. #kN m^2
Hinf = np.arange(-4950., 4950., 50.) #kN m/m
Hsup = np.arange(5050., 14950., 50)
F = -5000. #kN
nEdges = 5000
nVertex = nEdges + 1
Ni = np.zeros((len(Hinf), nVertex))
Qi = np.zeros((len(Hinf), nVertex))
Mi = np.zeros((len(Hinf), nVertex))
th0i = np.zeros(len(Hinf))
ph0i = []
gammaInf = []
for i in range(len(Hinf)):
(th0i[i], Ni[i], Qi[i], Mi[i]) = getOrbitPoints(EI, Hinf[i], F, nVertex)
(ph0, l, g) = computeElastica(th0i[i], Qi[i], Mi[i], nEdges, nVertex)
gammaInf.append(g)
ph0i.append(ph0)
Ns = np.zeros((len(Hsup), nVertex))
Qs = np.zeros((len(Hsup), nVertex))
Ms = np.zeros((len(Hsup), nVertex))
th0s = np.zeros(len(Hsup))
ph0s = []
gammaSup = []
for i in range(len(Hsup)):
(th0s[i], Ns[i], Qs[i], Ms[i]) = getOrbitPoints(EI, Hsup[i], F, nVertex)
(ph0, l, g) = computeElastica(th0s[i], Qs[i], Ms[i], nEdges, nVertex)
gammaSup.append(g)
ph0s.append(ph0)
distInf = np.zeros(len(Hinf))
for i in range(len(Hinf)):
distInf[i] = gammaInf[i][nVertex-1].real
distSup = np.zeros(len(Hsup))
for i in range(len(Hsup)):
distSup[i] = gammaSup[i][nVertex-1].real
fig, (ax0, ax1) = plt.subplots(nrows=2, figsize=(8, 16))
#ax = fig.gca()
ax0.plot(Hinf, distInf, 'b')
ax0.plot(Hsup, distSup, 'b')
ax0.grid()
ax0.axvline(x=5000., color=(0.5,0.5,0.5))
ax0.axhline(color=(0.5,0.5,0.5))
ax0.set_xlabel('$\mathcal{H}$')
ax0.set_ylabel('$d$')
ax1.plot(Hinf, ph0i, 'b')
ax1.plot(Hsup, ph0s, 'b')
ax1.grid()
ax1.axvline(x=5000., color=(0.5,0.5,0.5))
#ax1.axhline(color=(0.5,0.5,0.5))
#ax1.set_xlabel('$\mathcal{H}$')
ax1.set_ylabel('$theta_0$')
distInf[0]
np.pi*np.sqrt(EI/abs(F))
EI = 5000. #kN m^2
fig, ax0 = plt.subplots(figsize=(9, 9))
ax0.grid()
ax0.axvline(x=1., color=(0.5,0.5,0.5))
ax0.axhline(color=(0.5,0.5,0.5))
ax0.set_xlabel(r'$-\mathcal{H}/F$')
ax0.set_ylim(-1.5, 1.5)
ax0.set_ylabel(r'$d/d_0$')
ax1 = ax0.twinx()
ax1.set_ylim(-1.5, 1.5)
ax1.set_ylabel(r'$\theta_0/\pi$')
F = np.arange(-5000., -1000., 1000.) #kN
nEdges = 1000
nVertex = nEdges + 1
for k in range(len(F)):
Hinf = np.arange(F[k]+50., -F[k]-50, 100.) #kN m/m
Hsup = np.arange(-F[k]+50, 5950., 100)
Ni = np.zeros((len(Hinf), nVertex))
Qi = np.zeros((len(Hinf), nVertex))
Mi = np.zeros((len(Hinf), nVertex))
th0i = np.zeros(len(Hinf))
ph0i = []
gammaInf = []
for i in range(len(Hinf)):
(th0i[i], Ni[i], Qi[i], Mi[i]) = getOrbitPoints(EI, Hinf[i], F[k], nVertex)
(ph0, l, g) = computeElastica(th0i[i], Qi[i], Mi[i], nEdges, nVertex)
gammaInf.append(g)
ph0i.append(ph0)
Ns = np.zeros((len(Hsup), nVertex))
Qs = np.zeros((len(Hsup), nVertex))
Ms = np.zeros((len(Hsup), nVertex))
th0s = np.zeros(len(Hsup))
ph0s = []
gammaSup = []
for i in range(len(Hsup)):
(th0s[i], Ns[i], Qs[i], Ms[i]) = getOrbitPoints(EI, Hsup[i], F[k], nVertex)
(ph0, l, g) = computeElastica(th0s[i], Qs[i], Ms[i], nEdges, nVertex)
gammaSup.append(g)
ph0s.append(ph0)
distInf = np.zeros(len(Hinf))
for i in range(len(Hinf)):
distInf[i] = gammaInf[i][nVertex-1].real
distSup = np.zeros(len(Hsup))
for i in range(len(Hsup)):
distSup[i] = gammaSup[i][nVertex-1].real
ph0i = np.asarray(ph0i)
ph0s = np.asarray(ph0s)
#print(k)
#print(ph0i)
#print(ph0s)
ax0.plot(-Hinf/F[k], distInf/gammaInf[0][nVertex-1].real, 'b')
ax0.plot(-Hsup/F[k], distSup/gammaInf[0][nVertex-1].real, 'b')
ax1.plot(-Hinf/F[k], ph0i/np.pi, 'r')
ax1.plot(-Hsup/F[k], ph0s/np.pi, 'r')
fig, ax = plt.subplots(figsize=(9, 9))
ax.grid()
mHdivF = np.arange(-1., 1., 0.01)
vdivd0 = np.sqrt(2*(1 + mHdivF))/np.pi
ax.plot(mHdivF, vdivd0, 'b')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Feel free to adjust and experiment with these parameters after you have completed the lab
Step2: Convenience Functions
Step3: The Assignment
Step4: Go ahead and drop any row with a nan
Step5: In the future, you might try setting the nan values to the mean value of that column, the mean should only be calculated for the specific class rather than across all classes, now that you have the labels.
Step6: Split your data into a test and train set. Your test size should be 30% with random_state 7. Please use variable names
Step7: Create an SVC classifier named svc and use a linear kernel. You already have C defined at the top of the lab, so just set C=C.
Step8: Create an KNeighbors classifier named knn and set the neighbor count to 5
Step9: Fire it Up
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
C = 1
kernel = 'linear'
# TODO: Change to 200000 once you get to Question#2
iterations = 5000
# You can set this to false if you want to draw the full square matrix:
FAST_DRAW = True
def drawPlots(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):
# You can use this to break any higher-dimensional space down,
# And view cross sections of it.
# If this line throws an error, use plt.style.use('ggplot') instead
mpl.style.use('ggplot') # Look Pretty
padding = 3
resolution = 0.5
max_2d_score = 0
y_colors = ['#ff0000', '#00ff00', '#0000ff']
my_cmap = mpl.colors.ListedColormap(['#ffaaaa', '#aaffaa', '#aaaaff'])
colors = [y_colors[i] for i in y_train]
num_columns = len(X_train.columns)
fig = plt.figure()
fig.canvas.set_window_title(wintitle)
fig.set_tight_layout(True)
cnt = 0
for col in range(num_columns):
for row in range(num_columns):
# Easy out
if FAST_DRAW and col > row:
cnt += 1
continue
ax = plt.subplot(num_columns, num_columns, cnt + 1)
plt.xticks(())
plt.yticks(())
# Intersection:
if col == row:
plt.text(0.5, 0.5, X_train.columns[row], verticalalignment='center', horizontalalignment='center', fontsize=12)
cnt += 1
continue
# Only select two features to display, then train the model
X_train_bag = X_train.ix[:, [row,col]]
X_test_bag = X_test.ix[:, [row,col]]
model.fit(X_train_bag, y_train)
# Create a mesh to plot in
x_min, x_max = X_train_bag.ix[:, 0].min() - padding, X_train_bag.ix[:, 0].max() + padding
y_min, y_max = X_train_bag.ix[:, 1].min() - padding, X_train_bag.ix[:, 1].max() + padding
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# Plot Boundaries
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Prepare the contour
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=my_cmap, alpha=0.8)
plt.scatter(X_train_bag.ix[:, 0], X_train_bag.ix[:, 1], c=colors, alpha=0.5)
score = round(model.score(X_test_bag, y_test) * 100, 3)
plt.text(0.5, 0, "Score: {0}".format(score), transform = ax.transAxes, horizontalalignment='center', fontsize=8)
max_2d_score = score if score > max_2d_score else max_2d_score
cnt += 1
print("Max 2D Score: ", max_2d_score)
def benchmark(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):
print(wintitle + ' Results')
s = time.time()
for i in range(iterations):
# TODO: train the classifier on the training data / labels:
# .. your code here ..
print("{0} Iterations Training Time: ".format(iterations), time.time() - s)
s = time.time()
for i in range(iterations):
# TODO: score the classifier on the testing data / labels:
# .. your code here ..
print("{0} Iterations Scoring Time: ".format(iterations), time.time() - s)
print("High-Dimensionality Score: ", round((score*100), 3))
# .. your code here ..
# An easy way to show which rows have nans in them:
X[pd.isnull(X).any(axis=1)]
# .. your code here ..
# .. your code here ..
# .. your code here ..
# .. your code here ..
# .. your code here ..
benchmark(knn, X_train, X_test, y_train, y_test, 'KNeighbors')
drawPlots(knn, X_train, X_test, y_train, y_test, 'KNeighbors')
benchmark(svc, X_train, X_test, y_train, y_test, 'SVC')
drawPlots(svc, X_train, X_test, y_train, y_test, 'SVC')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Keras
Step2: tf.keras can run any Keras-compatible code, but keep in mind
Step3: Configure the layers
Step4: Train and evaluate
Step5: tf.keras.Model.compile takes three important arguments
Step6: Input NumPy data
Step7: tf.keras.Model.fit takes three important arguments
Step8: Input tf.data datasets
Step9: Here, the fit method uses the steps_per_epoch argument—this is the number of
Step10: Evaluate and predict
Step11: And to predict the output of the last layer in inference for the data provided,
Step12: Build advanced models
Step13: Instantiate the model given inputs and outputs.
Step14: Model subclassing
Step15: Instantiate the new model class
Step16: Custom layers
Step17: Create a model using your custom layer
Step18: Callbacks
Step19: <a name='save_and_restore'></a>
Step20: By default, this saves the model's weights in the
Step21: Configuration only
Step22: Recreate the model (newly initialized) from the JSON
Step23: Caution
Step24: <a name='eager_execution'></a>
Step25: Note
Step26: Define an input pipeline. The input_fn returns a tf.data.Dataset object
Step27: Next, create a tf.estimator.RunConfig and set the train_distribute argument
Step28: Convert the Keras model to a tf.estimator.Estimator instance
Step29: Finally, train the Estimator instance by providing the input_fn and steps
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install pyyaml # Required to save models in YAML format
import tensorflow.compat.v1 as tf
from tensorflow.keras import layers
print(tf.version.VERSION)
print(tf.keras.__version__)
model = tf.keras.Sequential()
# Adds a densely-connected layer with 64 units to the model:
model.add(layers.Dense(64, activation='relu'))
# Add another:
model.add(layers.Dense(64, activation='relu'))
# Add a softmax layer with 10 output units:
model.add(layers.Dense(10, activation='softmax'))
# Create a sigmoid layer:
layers.Dense(64, activation='sigmoid')
# Or:
layers.Dense(64, activation=tf.sigmoid)
# A linear layer with L1 regularization of factor 0.01 applied to the kernel matrix:
layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01))
# A linear layer with L2 regularization of factor 0.01 applied to the bias vector:
layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01))
# A linear layer with a kernel initialized to a random orthogonal matrix:
layers.Dense(64, kernel_initializer='orthogonal')
# A linear layer with a bias vector initialized to 2.0s:
layers.Dense(64, bias_initializer=tf.keras.initializers.constant(2.0))
model = tf.keras.Sequential([
# Adds a densely-connected layer with 64 units to the model:
layers.Dense(64, activation='relu', input_shape=(32,)),
# Add another:
layers.Dense(64, activation='relu'),
# Add a softmax layer with 10 output units:
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Configure a model for mean-squared error regression.
model.compile(optimizer=tf.train.AdamOptimizer(0.01),
loss='mse', # mean squared error
metrics=['mae']) # mean absolute error
# Configure a model for categorical classification.
model.compile(optimizer=tf.train.RMSPropOptimizer(0.01),
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_accuracy])
import numpy as np
def random_one_hot_labels(shape):
n, n_class = shape
classes = np.random.randint(0, n_class, n)
labels = np.zeros((n, n_class))
labels[np.arange(n), classes] = 1
return labels
data = np.random.random((1000, 32))
labels = random_one_hot_labels((1000, 10))
model.fit(data, labels, epochs=10, batch_size=32)
import numpy as np
data = np.random.random((1000, 32))
labels = random_one_hot_labels((1000, 10))
val_data = np.random.random((100, 32))
val_labels = random_one_hot_labels((100, 10))
model.fit(data, labels, epochs=10, batch_size=32,
validation_data=(val_data, val_labels))
# Instantiates a toy dataset instance:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
# Don't forget to specify `steps_per_epoch` when calling `fit` on a dataset.
model.fit(dataset, epochs=10, steps_per_epoch=30)
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32).repeat()
val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
val_dataset = val_dataset.batch(32).repeat()
model.fit(dataset, epochs=10, steps_per_epoch=30,
validation_data=val_dataset,
validation_steps=3)
data = np.random.random((1000, 32))
labels = random_one_hot_labels((1000, 10))
model.evaluate(data, labels, batch_size=32)
model.evaluate(dataset, steps=30)
result = model.predict(data, batch_size=32)
print(result.shape)
inputs = tf.keras.Input(shape=(32,)) # Returns a placeholder tensor
# A layer instance is callable on a tensor, and returns a tensor.
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
predictions = layers.Dense(10, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=predictions)
# The compile step specifies the training configuration.
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs
model.fit(data, labels, batch_size=32, epochs=5)
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# Define your layers here.
self.dense_1 = layers.Dense(32, activation='relu')
self.dense_2 = layers.Dense(num_classes, activation='sigmoid')
def call(self, inputs):
# Define your forward pass here,
# using layers you previously defined (in `__init__`).
x = self.dense_1(inputs)
return self.dense_2(x)
def compute_output_shape(self, input_shape):
# You need to override this function if you want to use the subclassed model
# as part of a functional-style model.
# Otherwise, this method is optional.
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_classes
return tf.TensorShape(shape)
model = MyModel(num_classes=10)
# The compile step specifies the training configuration.
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs.
model.fit(data, labels, batch_size=32, epochs=5)
class MyLayer(layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
shape = tf.TensorShape((input_shape[1], self.output_dim))
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=shape,
initializer='uniform',
trainable=True)
# Make sure to call the `build` method at the end
super(MyLayer, self).build(input_shape)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.output_dim
return tf.TensorShape(shape)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
model = tf.keras.Sequential([
MyLayer(10),
layers.Activation('softmax')])
# The compile step specifies the training configuration
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs.
model.fit(data, labels, batch_size=32, epochs=5)
callbacks = [
# Interrupt training if `val_loss` stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(val_data, val_labels))
model = tf.keras.Sequential([
layers.Dense(64, activation='relu', input_shape=(32,)),
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Save weights to a TensorFlow Checkpoint file
model.save_weights('./weights/my_model')
# Restore the model's state,
# this requires a model with the same architecture.
model.load_weights('./weights/my_model')
# Save weights to a HDF5 file
model.save_weights('my_model.h5', save_format='h5')
# Restore the model's state
model.load_weights('my_model.h5')
# Serialize a model to JSON format
json_string = model.to_json()
json_string
import json
import pprint
pprint.pprint(json.loads(json_string))
fresh_model = tf.keras.models.model_from_json(json_string)
# Create a trivial model
model = tf.keras.Sequential([
layers.Dense(64, activation='relu', input_shape=(32,)),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, batch_size=32, epochs=5)
# Save entire model to a HDF5 file
model.save('my_model.h5')
# Recreate the exact same model, including weights and optimizer.
model = tf.keras.models.load_model('my_model.h5')
model = tf.keras.Sequential([layers.Dense(64, activation='relu', input_shape=(32,)),
layers.Dense(10,activation='softmax')])
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(model)
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10,)))
model.add(layers.Dense(1, activation='sigmoid'))
optimizer = tf.train.GradientDescentOptimizer(0.2)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
def input_fn():
x = np.random.random((1024, 10))
y = np.random.randint(2, size=(1024, 1))
x = tf.cast(x, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(10)
dataset = dataset.batch(32)
return dataset
strategy = tf.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(train_distribute=strategy)
keras_estimator = tf.keras.estimator.model_to_estimator(
keras_model=model,
config=config,
model_dir='/tmp/model_dir')
keras_estimator.train(input_fn=input_fn, steps=10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The number of Markov chains and the number of steps each Markov chain is sampling has to be defined, as well as the tune_interval and the number of processors to be used in the parallel sampling. In this very simple example using only one processor is faster than forking the interpreter. However, if the calculation cost of the model increases it becomes more efficient to use many processors.
Step2: Define the number of dimensions for the multivariate gaussians, their weights and the covariance matrix.
Step3: The PyMC3 model. Note that we are making two gaussians, where one has w1 (90%) of the mass
Step4: Note
Step5: Finally, the sampling is executed
Step6: Note
Step7: Finally, we delete the sampling result folder. This folder may occupy significant disc-space (Gigabytes), depending on the number of sampling parameters for complex models. So we advice the user to check in advance if there is enough space on the disc.
|
<ASSISTANT_TASK:>
Python Code:
import pymc3 as pm
import numpy as np
from pymc3.step_methods import smc
import theano.tensor as tt
from matplotlib import pyplot as plt
from tempfile import mkdtemp
import shutil
%matplotlib inline
test_folder = mkdtemp(prefix='ATMIP_TEST')
n_chains = 500
n_steps = 100
tune_interval = 25
n_jobs = 1
n = 4
mu1 = np.ones(n) * (1. / 2)
mu2 = -mu1
stdev = 0.1
sigma = np.power(stdev, 2) * np.eye(n)
isigma = np.linalg.inv(sigma)
dsigma = np.linalg.det(sigma)
w1 = 0.1
w2 = (1 - w1)
def two_gaussians(x):
log_like1 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
log_like2 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))
with pm.Model() as ATMIP_test:
X = pm.Uniform('X',
shape=n,
lower=-2. * np.ones_like(mu1),
upper=2. * np.ones_like(mu1),
testval=-1. * np.ones_like(mu1),
transform=None)
like = pm.Deterministic('like', two_gaussians(X))
llk = pm.Potential('like', like)
with ATMIP_test:
step = smc.SMC(
n_chains=n_chains, tune_interval=tune_interval,
likelihood_name=ATMIP_test.deterministics[0].name)
mtrace = smc.ATMIP_sample(
n_steps=n_steps,
step=step,
n_jobs=n_jobs,
progressbar=False,
stage='0',
homepath=test_folder,
model=ATMIP_test,
rm_flag=True)
_ = pm.traceplot(mtrace, combined=True)
shutil.rmtree(test_folder)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 以下のようなデータがある
Step2: データを見てみよう
Step3: 1-2. 集計
Step4: 1-3. データの前処理
Step5: 欠損値の補間
Step6: カラムの追加
Step7: 1-4. データの可視化
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv("train.csv")
df.head()
# 65歳の人のデータを抜き出す
df[df.Age == 65][['Name', 'Age']]
# データ件数
len(df)
# 先頭2件を確認
df.head(2)
# 後ろ5件(デフォルト5件)を確認
df.tail()
# 先頭3件の名前、年齢、性別のみ出力
df[['Name', 'Age', 'Sex']].head(3)
# 計算可能なもの(数値)だけが対象
df.describe()
max_age = df['Age'].max()
print('年齢の最大値: {0}'.format(max_age))
mean_age = df['Age'].mean()
print('年齢の平均値: {0}'.format(mean_age))
# 女性で年齢の高い上位10名を確認する
df[df.Sex=='female'][['Name', 'Sex', 'Age']].sort_values('Age', ascending=0).head(10)
# Cabin (部屋番号)などの値には多くの欠損データが含まれている
# 687件が欠損している
df['Cabin'].isnull().sum()
# Ticket(チケット番号)は、今回の分析では有用ではなさそう
df[['Name', 'Ticket']].head()
# CabinとTicketのカラムを削除する
# 列指定で削除したいので axis=1 を指定する
df = df.drop(['Ticket', 'Cabin'], axis=1)
# df を上書きしているので、2回実行すると「TicketとCabinがないよー」って言われる
df.head()
# Age, Cabin などに欠損値(NaN)があることを確認
df.loc[4:10]
# interpolate() 関数で欠損値を補間する
# とはいえ、年齢で前後の間を埋めるのはよくない
df.loc[4:6][['Name', 'Age']].interpolate()
df.loc[4:6][['Name', 'Age']]
# 年齢の欠損値を、性別毎の年齢の平均値で補間する
female_age_mean = round(df[df.Sex=='female']['Age'].mean())
male_age_mean = round(df[df.Sex=='male']['Age'].mean())
print('女性の平均年齢は{0}歳、男性は{1}歳です。この平均年齢で補間します。'.format(female_age_mean, male_age_mean))
# 女性で年齢が欠損している人の例
df[df.PassengerId==20][['PassengerId', 'Name', 'Sex', 'Age']]
# 男性で年齢が欠損している人の例
df[df.PassengerId==6][['PassengerId', 'Name', 'Sex', 'Age']]
# 年齢の欠損値を平均で埋める
dff = df[df.Sex=='female'].fillna({'Age': female_age_mean})
dfm = df[df.Sex=='male'].fillna({'Age': male_age_mean})
df2 = dff.append(dfm)
# 新しいデータフレームでは年齢の平均値が入っている
df2[df2.PassengerId==20][['PassengerId', 'Name', 'Sex', 'Age']]
df2[df2.PassengerId==6][['PassengerId', 'Name', 'Sex', 'Age']]
# 年齢で分類し、数値をふる
def classification_age(age):
if age <= 19:
return '1'
elif age <= 34:
return '2'
elif age <= 49:
return '3'
elif age >= 50:
return '4'
else:
return '0'
# df.Age は df['Age] と同じ
df = df2
df['AgeClass'] = df.Age.map(classification_age)
df.head()
# 0 = 死亡, 1 = 生存という2つの軸でテータを見る
df['Survived'].plot(alpha=0.6, kind='hist', bins=2)
plt.xlabel('Survived')
plt.ylabel('N')
# 男性女性の死亡/生存をグラフにする
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6, 3))
for i, sex in enumerate(['male', 'female']):
df['Survived'][df.Sex==sex].hist(alpha=0.5, bins=2, ax=axes[i])
axes[i].set_title(sex)
fig.subplots_adjust(hspace=0.3)
fig.tight_layout()
# 男性の年齢ごとの死亡/生存をグラフにする
plt.hist([df[(df.Survived==0) & (df.Sex=='male')]['Age'],
df[(df.Survived==1) & (df.Sex=='male')]['Age']],
alpha=0.6, range=(1,80), bins=10, stacked=True,
label=('Died', 'Survived'))
plt.legend()
plt.xlabel('Age')
plt.ylabel('N')
plt.title('male')
# 女性の年齢ごとの死亡/生存をグラフにする
plt.hist([df[(df.Survived==0) & (df.Sex=='female')]['Age'],
df[(df.Survived==1) & (df.Sex=='female')]['Age']],
alpha=0.6, range=(1,80), bins=10, stacked=True,
label=('Died', 'Survived'))
plt.legend()
plt.xlabel('Age')
plt.ylabel('N')
plt.title('female')
# 女性と男性のグラフをY軸を合わせて並べて描画
fig = plt.figure(figsize=[15, 5])
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.subplot
# 121 は nrows, ncols, plot_number
ax1 = fig.add_subplot(121)
plt.hist([df[(df.Survived==0) & (df.Sex=='female')]['Age'],
df[(df.Survived==1) & (df.Sex=='female')]['Age']],
alpha=0.6, range=(1,80), bins=10, stacked=True,
label=('Died', 'Survived'))
plt.xlabel('Age')
plt.yticks([0, 50, 100, 150, 200, 250])
plt.ylabel('N')
plt.title('female')
plt.legend()
ax2 = fig.add_subplot(122)
plt.hist([df[(df.Survived==0) & (df.Sex=='male')]['Age'],
df[(df.Survived==1) & (df.Sex=='male')]['Age']],
alpha=0.6, range=(1,80), bins=10, stacked=True,
label=('Died', 'Survived'))
plt.xlabel('Age')
plt.yticks([0, 50, 100, 150, 200, 250])
plt.ylabel('N')
plt.title('male')
plt.legend()
plt.show()
mean_age = df['Age'].mean()
for pclass in [1, 2, 3]:
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=[10, 10])
sex_n=0
for sex in ['male', 'female']:
for survived in [0, 1]:
fig = df[((df.Survived==survived) & (df.Sex==sex) & (df.Pclass==pclass) )].Age.hist(alpha=0.6, bins=10, ax=axes[sex_n][survived])
fig.set_xlabel("Age")
fig.set_ylabel('N ('+sex+str(survived)+' )')
axes[sex_n][survived].set_ylim(0,70)
fig.set_title('Pclass = {0} / mean_age = {1}'.format(pclass, round(mean_age)))
sex_n += 1
plt.subplots_adjust(hspace=0.5)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple enough! We can see that each row in this dataset is a service request record containing
Step2: Wait, but we can do even better! Based on the project requirements and what we know about
Step3: A Check takes a function as an argument with the signature x -> Bool
Step4: Multiple columns can also use the same Check objects. In the code snippet
Step5: Once we've defined the DataFrameSchema, we can use it to verify the data.
Step6: With a DataFrameSchema, not only can we see what to expect from
Step7: Or if a column isn't the expected type.
Step8: Or if the column is somehow not present in the dataframe.
Step9: Note that calling schema.validate(df) will return the validated dataframe,
Step10: Adding Guardrails around your Data Munging Pipeline
Step11: Creating Derived Data
Step12: Usage Note
Step13: Usage Note
Step14: Now we can pipe these functions in sequence to obtain our cleaned data.
Step16: Reproducible Reports
Step17: Proportion of Service Requests Closed on or Before the Due Date
Step18: Daily Complaints per Borough
|
<ASSISTANT_TASK:>
Python Code:
import logging
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from collections import OrderedDict
from IPython.display import display, Markdown
from sodapy import Socrata
logging.disable(logging.WARNING)
# utility function to print python output as markdown snippets
def print_output(s):
display(Markdown("```python\n{}\n```".format(s)))
plt.style.use('seaborn-white')
%matplotlib inline
# define date range
DATE_RANGE = ("2018/12/01", "2018/12/31")
client = Socrata("data.cityofnewyork.us", None)
# get data from the beginning of the month
df_311 = pd.DataFrame.from_records(
client.get(
"erm2-nwe9",
# use socrata SoQL query clauses: https://dev.socrata.com/docs/queries/
where="created_date >= '%s' and created_date <= '%s'" % DATE_RANGE))
df_311.head(3)
# specify column names and types
usecols = OrderedDict([
("unique_key", str),
("borough", str),
("agency_name", str),
("created_date", "datetime64[ns]"),
("due_date", "datetime64[ns]"),
("closed_date", "datetime64[ns]"),
("complaint_type", str),
])
cols = list(usecols.keys())
# page through the results
MAX_PAGES = 500
LIMIT = 10000
records = []
print("fetching 311 data:")
for i in range(MAX_PAGES):
results = client.get(
"erm2-nwe9",
select=",".join(cols),
where="created_date >= '%s' and created_date <= '%s'" % DATE_RANGE,
order="created_date",
limit=LIMIT,
offset=LIMIT * i)
print(".", end="", flush=True)
records.extend(results)
if len(results) < LIMIT:
break
df_311 = pd.DataFrame.from_records(records)[cols]
df_311 = df_311.astype(usecols)
display(df_311.head(3))
from pandera import DataFrameSchema, Check, Column, Index, Bool, \
DateTime, Float, Int, String
schema = DataFrameSchema({
"column1": Column(String),
"column2": Column(Int, Check(lambda x: x > 0)),
"column3": Column(Float, [
Check(lambda x: x > 0.),
Check(lambda s: s.mean() > 0.5, element_wise=False)])
})
from pandera import SeriesSchema
s = pd.Series([1, 1, 2, 3])
series_schema = SeriesSchema(
Int, Check(lambda s: s.duplicated().sum() == 0, element_wise=False,
error="failed uniqueness check"))
try:
series_schema.validate(s)
except Exception as e:
print_output(e)
# define a date range checker
date_range_check = Check(
lambda s: (s >= pd.Timestamp(DATE_RANGE[0])) &
(s <= pd.Timestamp(DATE_RANGE[1])),
element_wise=False)
date_min_check = Check(
lambda s: s >= pd.Timestamp(DATE_RANGE[0]),
element_wise=False)
BOROUGHS = [
"BROOKLYN",
"QUEENS",
"BRONX",
"MANHATTAN",
"STATEN ISLAND",
"Unspecified"]
# constructing a schema should feel familiar for pandas users
df_311_schema = DataFrameSchema({
# make sure unique_key is unique
"unique_key": Column(String, Check(lambda s: s.duplicated().sum() == 0,
element_wise=False,
error="column is not unique")),
# assert borough column contain proper values
"borough": Column(String, Check(lambda x: x in BOROUGHS,
error="borough check failed")),
"agency_name": Column(String),
# assert that records are within the date range
"created_date": Column(DateTime, date_range_check),
"due_date": Column(DateTime, date_min_check, nullable=True),
"closed_date": Column(DateTime, date_min_check, nullable=True),
"complaint_type": Column(String),
})
def preprocess_data(df):
# remove records where closed_date occurs before created_date
df = df[~(df.closed_date < df.created_date)]
return df
preprocessed_df_311 = df_311_schema.validate(preprocess_data(df_311))
df_311_corrupt = df_311.copy()
df_311_corrupt["created_date"].iloc[:5] = df_311_corrupt[
"created_date"].head(5) - pd.Timedelta(weeks=10)
try:
df_311_schema.validate(df_311_corrupt)
except Exception as e:
print_output(e.code)
df_311_corrupt = df_311.copy().assign(
unique_key=df_311.unique_key.astype(int))
try:
df_311_schema.validate(df_311_corrupt)
except Exception as e:
print_output(e.code)
df_311_corrupt = df_311.copy().drop("complaint_type", axis=1)
try:
df_311_schema.validate(df_311_corrupt)
except Exception as e:
print_output(e.code)
def processing_function(df):
# do something
...
return processed_df
def processing_function(df):
# do something
...
# validate the output
return schema.validate(processed_df)
def processing_function(df):
# validate the input
df = schema.validate(df)
# do something
...
return processed_df
REPLACE_DICT = {
"Noise - Residential": "Noise",
"Noise - Street/Sidewalk": "Noise",
"Noise - Commercial": "Noise",
"Noise - Park": "Noise",
"Noise - Helicopter": "Noise",
"Noise - Vehicle": "Noise",
}
clean_complaint_schema = DataFrameSchema({
"complaint_type_clean": Column(String, [
Check(lambda x: x not in REPLACE_DICT),
Check(lambda s: (s == "Noise").any(), element_wise=False)
])
})
def clean_complaint_type(df):
clean_df = (
df.assign(complaint_type_clean=df.complaint_type)
.replace({"complaint_type_clean": REPLACE_DICT})
)
return clean_complaint_schema.validate(clean_df)
clean_complaint_type(df_311).head(3)
from pandera import check_output
@check_output(DataFrameSchema({"closed_lte_due": Column(Bool, nullable=True)}))
def add_closed_lte_due(df):
return df.assign(
closed_lte_due=(
(df.closed_date <= df.due_date)
.where(df.due_date.notnull(), pd.NaT))
)
add_closed_lte_due(df_311).head(3)
from pandera import check_input
@check_input(DataFrameSchema({"created_date": Column(DateTime)}))
@check_output(DataFrameSchema({"created_date_clean": Column(DateTime)}))
def clean_created_date(df):
return (
df.assign(created_date_clean=(
df.created_date
.dt.strftime("'%Y-%m-%d")
.astype("datetime64[ns]")))
)
clean_created_date(df_311).head(3)
BOROUGH_POPULATION_MAP = {
"BROOKLYN": 2648771,
"QUEENS": 2358582,
"BRONX": 1471160,
"MANHATTAN": 1664727,
"STATEN ISLAND": 479458,
}
@check_output(DataFrameSchema({
"borough_population": Column(
Float, Check(lambda x: x > 0),nullable=True)
}))
def add_borough_population(df):
return df.assign(
borough_population=df.borough.map(BOROUGH_POPULATION_MAP))
add_borough_population(df_311).head(3)
clean_df_311 = (
df_311
.pipe(clean_complaint_type)
.pipe(add_closed_lte_due)
.pipe(clean_created_date)
.pipe(add_borough_population))
complaint_by_borough_schema = DataFrameSchema({
"borough": Column(String),
"borough_population": Column(Float, nullable=True),
"complaint_type_clean": Column(String),
# make sure count column contains positive integers
"count": Column(Int, Check(lambda x: x > 0)),
"complaints_per_pop": Column(Float)
})
TOP_N = 12
COMPLAINT_TYPE_TITLE = \
"%s ( %s - %s )" % (
"Number of New York 311 service requests by borough and complaint type",
DATE_RANGE[0], DATE_RANGE[1])
def normalize_by_population(
df: pd.DataFrame,
count: str,
population: str,
scale: float) -> pd.Series:
Normalizes at 1 million scale.
return df[count] / (df[population] / scale)
@check_output(complaint_by_borough_schema)
def agg_complaint_types_by_borough(clean_df):
plot_df = (
clean_df
.groupby(["borough", "borough_population", "complaint_type_clean"])
.unique_key.count()
.rename("count")
.reset_index()
.assign(complaints_per_pop=lambda df: (
normalize_by_population(df, "count", "borough_population", 10**6)))
)
# select only the top 12 complaint types (across all boroughs)
top_complaints = (
clean_df.complaint_type_clean
.value_counts()
.sort_values(ascending=False)
.head(TOP_N).index.tolist())
return plot_df[plot_df.complaint_type_clean.isin(top_complaints)]
# this is probably overkill, but this illustrates that you can
# add schema checks at the interface of two functions.
@check_input(complaint_by_borough_schema)
def plot_complaint_types_by_borough(complaint_by_borough_df):
g = sns.catplot(
x="complaints_per_pop",
y="borough",
col="complaint_type_clean",
col_wrap=3,
data=complaint_by_borough_df,
kind="bar",
height=3,
aspect=1.4,
sharex=False,
)
g.set_titles(template="{col_name}")
g.set_ylabels("")
g.set_xlabels("n complaints / 1M people")
g.fig.suptitle(COMPLAINT_TYPE_TITLE, y=1.05, fontweight="bold", fontsize=18)
plt.tight_layout()
plt.subplots_adjust(hspace=0.6, wspace=0.4)
sns.despine(left=True, bottom=True)
for ax in g.axes.ravel():
ax.tick_params(left=False)
return g
with sns.plotting_context(context="notebook", font_scale=1.2):
g = agg_complaint_types_by_borough(clean_df_311).pipe(
plot_complaint_types_by_borough)
proportion_by_agency_schema = DataFrameSchema({
"agency_name": Column(String),
"proportion_closed_on_time": Column(
Float, Check(lambda x: 0 <= x <= 1), nullable=True)
})
PROPORTION_BY_AGENCY_TITLE = \
"%s ( %s - %s )" % (
"Proportion of New York 311 requests closed on time by Responding Agency",
DATE_RANGE[0], DATE_RANGE[1])
@check_output(proportion_by_agency_schema)
def agg_proportion_by_agency(clean_df):
return (
clean_df.groupby("agency_name")
.closed_lte_due.apply(lambda s: s.mean() if s.count() > 0 else np.nan)
.dropna()
.rename("proportion_closed_on_time")
.reset_index("agency_name")
.query("proportion_closed_on_time > 0")
)
@check_input(proportion_by_agency_schema)
def plot_proportion_by_agency(proportion_by_agency_df):
g = sns.catplot(
x="proportion_closed_on_time", y="agency_name",
order=proportion_by_agency_df.sort_values(
"proportion_closed_on_time", ascending=False).agency_name,
data=proportion_by_agency_df,
kind="bar",
height=8,
aspect=1.4)
sns.despine(left=True, bottom=True)
g.set_ylabels("")
g.set_xlabels("proportion closed on time")
for ax in g.axes.ravel():
ax.tick_params(left=False)
g.fig.suptitle(PROPORTION_BY_AGENCY_TITLE, y=1.03, fontweight="bold", fontsize=14)
return g
with sns.plotting_context(context="notebook", font_scale=1.1):
axes = plot_proportion_by_agency(agg_proportion_by_agency(clean_df_311))
daily_complaints_schema = DataFrameSchema({
"created_date_clean": Column(DateTime, Check(lambda x: x >= pd.Timestamp(DATE_RANGE[0]))),
"borough": Column(String, Check(lambda x: x in BOROUGHS)),
"borough_population": Column(Float, nullable=True),
"count": Column(Int, Check(lambda x: x > 0)),
"complaints_per_pop": Column(Float, nullable=True)
})
DAILY_COMPLAINTS_TITLE = \
"%s ( %s - %s )" % (
"Number of daily New York 311 requests by borough",
DATE_RANGE[0], DATE_RANGE[1])
@check_output(daily_complaints_schema)
def agg_daily_complaints(clean_df):
return (
clean_df
.groupby(["borough", "borough_population", "created_date_clean"])
.unique_key.count().rename("count")
.reset_index()
.assign(complaints_per_pop=lambda df: (
normalize_by_population(df, "count", "borough_population", 10**3))))
@check_input(daily_complaints_schema)
def plot_daily_complaints(daily_complaints_df):
fig, ax = plt.subplots(1, figsize=(12, 6))
ax = sns.lineplot(
x="created_date_clean", y="complaints_per_pop", hue="borough",
data=daily_complaints_df, ax=ax)
sns.despine()
ax.set_ylabel("n complaints / 1K people")
ax.set_xlabel("created on")
fig.suptitle(DAILY_COMPLAINTS_TITLE, y=0.99, fontweight="bold", fontsize=16)
return ax
with sns.plotting_context(context="notebook", font_scale=1.2):
plot_daily_complaints(agg_daily_complaints(clean_df_311))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: iii. Explain what private and public do
Step2: iv. Explain what size_t is used for
Step3: v. Explain why this code avoids the use of C pointers
Step4: ix. Explain what a list initializer does
Step5: x. Explain what the "Rule of Zero" is, and how it relates to the "Rule of Three"
|
<ASSISTANT_TASK:>
Python Code:
//***Do like your comments in stack.h so copied here for future review and study:***
// In C++ a class is just a fancy struct
// Both struct and class have two internal namespaces:
// private: only accessible by the struct/class itself
// public: accessible by other code that is using the struct/class
//
// When both private and public are explicitly declared, there is no
// difference between a class and a struct
//
// However, when neither private nor public are declared, then:
// struct: defaults to public
// class: defaults to private
//
// By convention (Google), use structs for static data, and use
// classes for everything else
//
// Notably, classes can include internal function definitions (like in Python)
// Internal functions of a class are called "methods"
//
// A class also always has a pointer to itself available, named "this"
// The keyword "this" serves a similar purpose to "self" in Python
// It allows you to access a specific "instance" of the class so that
// you can manipulate it within the definitions of your methods
//
// Recall that C++ automatically typedefs structs/classes
//*Quote from comments:*
// This structure type is private to the class, and used as a form of
// linked list in order to contain the actual (static) data stored by the Stack class
//*Quote from comments:*
// Size method
// Specifying const tells the compiler that the method will not change the
// internal state of the instance of the class
//*Quote from comments:*
// However, by using the "unique_ptr" type above, we carefully avoid any
// explicit memory allocation by using the allocation pre-defined inside the
// unique_ptr itself. By using memory-safe structures in this way, we are using
// the "Rule of Zero" and simplifying our life by defining ZERO of them:
// https://rmf.io/cxx11/rule-of-zero/
// http://www.cplusplus.com/reference/memory/unique_ptr/
//*Quote from comments*
// Implementation of default constructor
Stack::Stack()
: depth(0) // internal depth is 0
, head(nullptr) // internal linked list is null to start
{};
// The construction ": var1(val1), var2(val2) {}" is called a
// "list initializer" for a constructor, and is the preferred
// way of setting default field values for a class instance
// Here 0 is the default value for Stack::depth
// and nullptr is the default value for Stack::head
//*Quote from comments:*
// Normally we would have to implement the following things in C++ here:
// 1) Class Destructor : to deallocate memory when a Stack is deleted
// ~Stack();
//
// 2) Copy Constructor : to define what Stack b(a) does when a is a Stack
// This should create a copy b of the Stack a, but
// should be defined appropriately to do that
// Stack(const Stack&);
//
// 3) Copy Assignment : to define what b = a does when a is a Stack
// This should create a shallow copy of the outer
// structure of a, but leave the inner structure as
// pointers to the memory contained in a, and should
// be defined appropriately to do that
// Stack& operator=(const Stack&);
//
// The need for defining ALL THREE of these things when managing memory for a
// class explicitly is known as the "Rule of Three", and is standard
// http://stackoverflow.com/questions/4172722/what-is-the-rule-of-three
//
// However, by using the "unique_ptr" type above, we carefully avoid any
// explicit memory allocation by using the allocation pre-defined inside the
// unique_ptr itself. By using memory-safe structures in this way, we are using
// the "Rule of Zero" and simplifying our life by defining ZERO of them:
// https://rmf.io/cxx11/rule-of-zero/
// http://www.cplusplus.com/reference/memory/unique_ptr/
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A. erry
Step2: A. iic
Step3: A. tony
Step4: Writing a To File
Step5: Watch Me Code 1
Step6: A. 1
Step7: A. 1
Step8: End-To-End Example (Pre-Recorded)
|
<ASSISTANT_TASK:>
Python Code:
x = input()
if x.find("rr")!= -1:
y = x[1:]
else:
y = x[:-1]
print(y)
x = input()
y = x.split()
w = ""
for z in y:
w = w + z[1]
print(w)
x = input()
x = x + x
x = x.replace("o","i")
x = x[:5]
print(x)
# all at once
with open(filename, 'r') as handle:
contents = handle.read()
# a line at a time
with open(filename, 'r') as handle:
for line in handle.readlines():
do_something_with_line
# write mode
with open(filename, 'w') as handle:
handle.write(something)
# append mode
with open(filename, 'a') as handle:
handle.write(something)
a = "savename.txt"
with open(a,'w') as b:
c = input("Enter your name: ")
b.write(c)
with open("sample.txt","r") as f:
for line in f.readlines():
print(line)
g = "done"
try:
file = 'data.txt'
with open(file,'r') as f:
print( f.read() )
except FileNotFoundError:
print(f"{file} was not found!")
file = "a.txt"
with open(file,'w'):
file.write("Hello")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step7: 1. Connect DB and Make QUERY
Step8: 2. Make Pandas DataFrame Each Position Player
Step9: 3. Set Position Category and Concat Each Datafream
Step10: 4. Make Training and Test Data
Step11: 5. Make Decision Tree Classifier Model
Step12: 6. Check Confusion Matrix
Step13: 7. Cecck Classification Report
Step15: 8. Recomend Position
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_formats = {'png', 'retina'}
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import MySQLdb
from sklearn.tree import export_graphviz
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
db = MySQLdb.connect(
"db.fastcamp.us",
"root",
"dkstncks",
"football",
charset='utf8',
)
def make_query(position):
parameter------------
position : M, D, F, G
return---------------
SQL_QUERY String
SQL_QUERY =
SELECT *
FROM player
if position == "F":
SQL_QUERY +=
WHERE position not like "%,%" and position like "%FW%" and mins > 270
if position == "M":
SQL_QUERY +=
WHERE position not like "%,%" and position like "%M%" and mins > 270
if position == "D":
SQL_QUERY +=
WHERE position not like "%,%" and position like "%D%" and position not like " DMC" and mins > 270
if position == "G":
SQL_QUERY +=
WHERE position not like "%,%" and position like "%G%" and mins > 270
return SQL_QUERY
# forword
SQL_QUERY = make_query("F")
forword_df = pd.read_sql(SQL_QUERY, db)
# midfilder
SQL_QUERY = make_query("M")
midfilder_df = pd.read_sql(SQL_QUERY, db)
# defencer
SQL_QUERY = make_query("D")
defencer_df = pd.read_sql(SQL_QUERY, db)
# goalkeeper
SQL_QUERY = make_query("G")
goalkeeper_df = pd.read_sql(SQL_QUERY, db)
len(forword_df), len(midfilder_df), len(defencer_df), len(goalkeeper_df)
forword_df["position"] = 0
forword_df
midfilder_df["position"] = 1
midfilder_df
defencer_df["position"] = 2
defencer_df
goalkeeper_df["position"] = 3
goalkeeper_df
concated_df = pd.concat([forword_df, midfilder_df, defencer_df, goalkeeper_df])
concated_df.tail()
X_train, X_test, y_train, y_test = train_test_split(concated_df.ix[:,:-1], concated_df.ix[:,-1], test_size=0.2, random_state=1)
from sklearn.tree import DecisionTreeClassifier
model_entropy = DecisionTreeClassifier(criterion='entropy', max_depth=3).fit(X_train, y_train)
model_gini = DecisionTreeClassifier(criterion='gini', max_depth=3).fit(X_train, y_train)
from sklearn.naive_bayes import GaussianNB
model_gaussian = GaussianNB().fit(X_train, y_train)
from sklearn.ensemble import VotingClassifier
clf1 = DecisionTreeClassifier(criterion='entropy', max_depth=3)
clf2 = DecisionTreeClassifier(criterion='gini', max_depth=3)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[('entropy', clf1), ('gini', clf2), ('naive', clf3)], voting='soft', weights=[2, 1, 1])
model_ensemble = eclf.fit(X_train, y_train)
cm_entropy = confusion_matrix(y_test, model_entropy.predict(X_test))
cm_gini = confusion_matrix(y_test, model_gini.predict(X_test))
cm_gaussian = confusion_matrix(y_test, model_gaussian.predict(X_test))
cm_ensemble = confusion_matrix(y_test, model_ensemble.predict(X_test))
print("entropy"+"="*12)
print(cm_entropy)
print("gini"+"="*15)
print(cm_gini)
print("gaussian"+"="*11)
print(cm_gaussian)
print("ensemble"+"="*11)
print(cm_ensemble)
print("entropy"+"="*50)
print(classification_report(y_test, model_entropy.predict(X_test)))
print("gini"+"="*50)
print(classification_report(y_test, model_gini.predict(X_test)))
print("gaussian"+"="*50)
print(classification_report(y_test, model_gaussian.predict(X_test)))
print("ensemble"+"="*50)
print(classification_report(y_test, model_ensemble.predict(X_test)))
SQL_QUERY =
SELECT
tall, weight, apps_sub, mins, goals, assists
, spg, ps_x, motm, aw, tackles, inter, fouls, clear, drb
, owng, keyp_x, fouled, off, disp, unstch, avgp, position
FROM player
WHERE position like "%,%" and mins > 270
;
many_position_player_df = pd.read_sql(SQL_QUERY, db)
len(many_position_player_df)
predict_data = model_ensemble.predict(many_position_player_df.ix[:,:-1])
many_position_player_df["recomend_position"] = predict_data
# Recomend Result
# 0 : Forword, 1 : Midfilder, 2 : Defencer, 3 : Goalkeeper
many_position_player_df.ix[:10,-2:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Rename the columns. They're now streamlined with the taxi input.
Step2: Parse the dates.
Step3: Have a look at every station. How many pickups are there per station?
Step4: When you take a look at the map, you won't be surprised that the Grand Central Station is the one with the most frequent usage.
Step5: So we have 326 different start stations.
Step6: We have 326 different end stations, too. But are all end_stations contained in start_stations and vive versa?
Step7: So this is the proof, that all stations are used as start and end stations.
Step8: So there is not that much data missing. That's quite surprising, maybe it's wrong.
Step9: So we only have many zeros in the gender-feature. Gender can have 3 values (0=unknown, 1=male, 2=female)
Step10: So we see, we have also missing values in "gender" and "birth year". But we are not interested in this features, therefore we can ignore this missing values at this point.
Step11: We are interested in the trip time in minutes.
Step12: We have lots of outliers.
Step13: Identify the the cases without geo data and remove them from our data to be processed.
Step14: So how many percent of data are left to be processed?
Step15: <font color = 'blue'> So we dropped nothing because of missing geo tags. </font color>
Step16: We sometimes have some unreasonably small trip_times.
Step17: Be aware, this operation is quite slow.
Step18: We cannot use miles because vincenty ceils / floors the results
Step19: Check if new column is present
Step20: Parse to km/h
Step21: Convert avg_velocity from km/h to miles/h. (1km = 0,621371 miles)
Step22: Be aware
Step23: We dropped about 42% of the data!
Step24: A little drawing
Step25: Where is a pickup maximum?
Step26: The area with the maximum pickups is around the Grand Central Terminal. Not very surprising.
Step27: So we know that about 97% of the bins have 0 pickups in it. This was expected, because we have static stations only.
Step28: We lost no trips through the bounding box.
Step29: Let's take a first look at the distribution of the cleaned target variable which we want to estimate
Step30: Make a new dataframe with features and targets
Step31: Use minutes for prediction instead of seconds (ceil the time). Definitley more robust than seconds!
Step32: So we hace 10 different velocities to predict.
Step33: Changed test size to 5%
Step34: If you want to export something...
Step35: Just to be sure
Step36: So this is a bad result. Let's try random forest now.
Step37: Watch at the best results.
Step38: Train the best random forest again seperately
Step39: So this plot is not particularly useful in this case. But it's a hinto to how to visualize the results.
Step40: Dump the best random forest...
|
<ASSISTANT_TASK:>
Python Code:
data = pd.read_csv('data/bike_oneweekfrom20140505.csv', index_col=0, parse_dates=True)
data.info()
data.columns
new_column_names = ['trip_time', 'pickup_datetime', 'dropoff_datetime', 'start_station_id',
'start_station_name', 'pickup_latitude',
'pickup_longitude', 'end_station_id', 'end_station_name',
'dropoff_latitude', 'dropoff_longitude', 'bikeid', 'usertype',
'birth year', 'gender']
data.columns = new_column_names
data.describe()
data['pickup_datetime'] =pd.to_datetime(data['pickup_datetime'], format = '%Y-%m-%d %H:%M:%S')
data['dropoff_datetime'] =pd.to_datetime(data['dropoff_datetime'], format = '%Y-%m-%d %H:%M:%S')
data['trip_time'] = pd.to_timedelta(data['trip_time'], 's')
data.describe().transpose()
data.head()
data.info()
group_by_start = data.groupby(data.start_station_id, )
usage_freq = group_by_start.trip_time.count()
print('Amount of stations: ' + str(len(usage_freq)))
plt.hist(usage_freq, bins = 50)
plt.title('Frequency of starts from start stations')
group_by_start_name = data.groupby(data.start_station_name, )
usage_freq_name = group_by_start_name.trip_time.count()
usage_freq_name.sort_values().keys()[-10:]
usage_freq_name.sort_values().keys()
group_by_end_name = data.groupby(data.end_station_name, )
usage_freq_name = group_by_end_name.trip_time.count()
usage_freq_name.sort_values().keys()
group_by_start_id = data.groupby(data.start_station_id, )
start_ids = group_by_start_id.trip_time.count().sort_values().keys()
group_by_end_id = data.groupby(data.end_station_id, )
end_ids = group_by_end_id.trip_time.count().sort_values().keys()
len(set(start_ids).intersection(end_ids))
data.isnull().sum()
(data==0).sum()
from collections import Counter
print(Counter(data.gender))
Counter(data['birth year'])
plt.hist((data['trip_time'] / np.timedelta64(1, 'm')), bins=30, range=[0, 100])
plt.title('Distribution of trip_time in minutes')
plt.xlabel('trip_time')
plt.ylabel('frequency')
plt.savefig('figures/bike_trip_time.png', format='png', dpi=300)
sns.boxplot((data['trip_time'] / np.timedelta64(1, 'm')))
data.trip_time.quantile([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99])
len(data.trip_time.value_counts().values)
anomaly = data.loc[(data['dropoff_longitude'].isnull()) | (data['dropoff_latitude'].isnull()) |
(data['pickup_longitude'].isnull()) | (data['pickup_latitude'].isnull())]
data = data.drop(anomaly.index)
anomaly['flag'] = 'geo_NA'
data.isnull().sum()
len(data)/(len(data)+len(anomaly))
anomaly.tail()
plt.hist(data.trip_time.values / np.timedelta64(1, 'm'), bins=50, range=[0,100])
data['trip_dist'] = -1 # Init trip_dist
# inpout for vincenty:(location.latitude, location.longitude)
from geopy.distance import vincenty
for i in range(0, (len(data)-1)):
pickup = (data.iloc[i]['pickup_latitude'], data.iloc[i]['pickup_longitude'])
dropoff = (data.iloc[i]['dropoff_latitude'], data.iloc[i]['dropoff_longitude'])
data.set_value(i, 'trip_dist', vincenty(pickup,dropoff).meters)
data.trip_dist
data.columns
# data.to_csv('data/bike_20140505_with_dist.csv')
data['avg_velocity'] = data.trip_dist.values/(data.trip_time / (np.timedelta64(1, 'h')))
data.avg_velocity
data.avg_velocity = data.avg_velocity/1000
data.avg_velocity
data['avg_velocity'] = data.avg_velocity*0.627371
# data.to_csv('data/bike_20140505_with_dist_and_avg_velo.csv')
plt.hist(data.avg_velocity, bins=60, range=[0,15])
plt.title('Distribution of avg_velocity in mph')
plt.xlabel('avg_velocity')
plt.ylabel('frequency')
plt.savefig('figures/bike_avg_vel.png', format='png', dpi=300)
data.avg_velocity.describe() # in mph
data.head()
data.avg_velocity.quantile([.0001,.01, .5, .75, .95, .975, .99, .995])
lb = 5
ub = 15
anomaly = data.loc[(data['avg_velocity'] < lb) | (data['avg_velocity'] > ub)]
# be careful! Maybe adjust to append.
#anomaly.loc[
anomaly.loc[data.loc[(data['avg_velocity'] < lb)].index,'flag'] = 'too_slow'
anomaly.loc[data.loc[(data['avg_velocity'] > ub)].index,'flag'] = 'too_fast'
data = data.drop(anomaly.index, errors='ignore') # ignore uncontained labels / indices
print(1-len(data)/(len(data)+len(anomaly)))
anomaly.head()
data.avg_velocity.describe()
anomaly.tail()
x = data.pickup_longitude
y = data.pickup_latitude
bins = 100;
H, xedges, yedges = np.histogram2d(x, y, bins=bins)
plt.jet()
fig = plt.figure(figsize=(20, 10))
#ax.set_title('pcolormesh: exact bin edges')
#mesh = plt.pcolormesh(X, Y, H)
plt.hist2d(x,y,bins=bins)
plt.colorbar()
plt.scatter(xedges[79], yedges[61], marker='x')
ax = fig.gca()
ax.grid(False)
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Pickup Frequency')
#ax.set_aspect('equal')
#plt.savefig('figure.pdf', format='pdf')
#plt.savefig('figure.png', format='png')
plt.savefig('figures/bike_stations.png', format='png', dpi=300)
np.amax(H)
np.where(H==2001)
yedges[79],xedges[61],
type(H)
print(H.shape)
print(H.size)
print(H.max())
print('Current bin width:')
print(vincenty((xedges[0], yedges[0]), (xedges[1], yedges[1])).meters)
distances = (-1)*np.ones(len(xedges)-1)
for x in range(0,len(xedges)-1, 1):
distances[x] = vincenty((xedges[x], yedges[x]), (xedges[x+1], yedges[x+1])).meters
print('Sizes of all bins in meters:')
distances
(H==0).sum()/H.size
jfk_geodata = (40.641547, -73.778118)
ridgefield_geodata = (40.856406, -74.020642)
data_in_box = data.loc[(data['dropoff_latitude'] > jfk_geodata[0]) &
(data['dropoff_longitude'] < jfk_geodata[1]) &
(data['dropoff_latitude'] < ridgefield_geodata[0]) &
(data['dropoff_longitude'] > ridgefield_geodata[1]) &
(data['pickup_latitude'] > jfk_geodata[0]) &
(data['pickup_longitude'] < jfk_geodata[1]) &
(data['pickup_latitude'] < ridgefield_geodata[0]) &
(data['pickup_longitude'] > ridgefield_geodata[1])
]
# taxidata = taxidata.drop(anomaly.index)
len(data_in_box)/len(data)
data_in_box.head()
h = data_in_box.avg_velocity.values
plt.figure(figsize=(20,10))
plt.hist(h, normed=False, bins=150)
#, histtype='stepfilled')
#plt.yscale('log')
#plt.ylabel('log(freq x)', fontsize=40)
#plt.xlabel('x = avg_amount_per_minute', fontsize=40)
#print('Min:' + str(min(h)) + '\nMax:' + str(max(h)))
#plt.locator_params(axis = 'x', nbins = 20)
plt.show()
data_in_box.head()
time_regression_df = pd.DataFrame([ #data_in_box['pickup_datetime'].dt.day, # leave this out
data_in_box['pickup_datetime'].dt.dayofweek,
data_in_box['pickup_datetime'].dt.hour,
data_in_box['pickup_latitude'],
data_in_box['pickup_longitude'],
data_in_box['dropoff_latitude'],
data_in_box['dropoff_longitude'],
np.ceil(data_in_box['avg_velocity'])
]).T
time_regression_df.columns = [#'pickup_datetime_day',
'pickup_datetime_dayofweek', 'pickup_datetime_hour',
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude',
'avg_velocity_mph']
time_regression_df.head()
time_regression_df.tail()
time_regression_df.ix[:,0:7].describe()
print(time_regression_df.avg_velocity_mph.value_counts())
print(len(time_regression_df.avg_velocity_mph.value_counts()))
from sklearn import cross_validation as cv
y = time_regression_df['avg_velocity_mph']
X = time_regression_df.ix[:,0:6]
X_train, X_test, y_train, y_test = cv.train_test_split(X, y,test_size=0.05,random_state=0)
from sklearn import cross_validation as cv
time_regression_df_train, time_regression_df_test = cv.train_test_split(time_regression_df, test_size=0.05, random_state=99)
y_train = time_regression_df_train['avg_velocity_mph']
x_train = time_regression_df_train.ix[:, 0:6]
y_test = time_regression_df_test['avg_velocity_mph']
x_test = time_regression_df_test.ix[:, 0:6]
x_test.head()
y_test.head()
xy_test = pd.concat([x_test, y_test], axis=1)
xy_test.head()
#Xy_test.to_csv('taxi_tree_test_Xy_20130506-12.csv')
#X_test.to_csv('taxi_tree_test_X_20130506-12.csv')
#y_test.to_csv('taxi_tree_test_y_20130506-12.csv')
# Xy_test.to_csv('bike_tree_test_Xy_20140505-11.csv')
# X_test.to_csv('bike_tree_test_X_20140505-11.csv')
# y_test.to_csv('bike_tree_test_y_20140506-11.csv')
# Xy_test_sample = Xy_test.sample(10000, random_state=99)
# Xy_test_sample.to_csv('taxi_tree_test_Xy_sample.csv')
# Xy_test_sample.head()
print(x_train.shape)
print(x_train.size)
print(x_test.shape)
print(X.shape)
print(x_train.shape[0]+x_test.shape[0])
import time
# Import the necessary modules and libraries
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import matplotlib.pyplot as plt
regtree = DecisionTreeRegressor(min_samples_split=100, random_state=99, max_depth=20)# formerly 15. 15 is reasonable
# random states: 99
regtree.fit(x_train, y_train)
regtree.score(x_test, y_test)
y_pred = regtree.predict(x_test)
diff_regtree = (y_pred-y_test)
# plt.figure(figsize=(12,10)) # not needed. set values globally
plt.hist(diff_regtree.values, bins=100, range=[-6,6])
print('Perzentile(%): ', [1,5,10,15,25,50,75,90,95,99], '\n', np.percentile(diff_regtree.values, [1,5,10,15,25,50,75,90,95,99]))
print('Absolute time deviation (in 1k): ', sum(abs(diff_regtree))/1000)
plt.title('Simple Decision Tree Regressor')
plt.xlabel('deviation in mph')
plt.ylabel('frequency')
plt.savefig('figures/bike_tree.png', format='png', dpi=300)
diff_regtree.describe()
from sklearn.ensemble import RandomForestRegressor
n_est_list = list(range(2,32,8))
min_sam_leaf_list = [2,50,100,150]
max_depth_list = list(range(2,32,8))
results = np.empty([0,4])
for n_est in n_est_list:
for min_sam_leaf in min_sam_leaf_list:
for max_depth in max_depth_list:
rd_regtree = RandomForestRegressor(n_estimators=n_est,n_jobs=6,min_samples_leaf=min_sam_leaf, random_state=99, max_depth=max_depth)
rd_regtree.fit(x_train, y_train)
score = rd_regtree.score(x_test, y_test)
results = np.vstack((results, [n_est, min_sam_leaf, max_depth,score]))
best = np.where(results == max(results[:,3]))[0]
results[best,:]
results[np.argsort(results[:, 3])][-10:,:]
results = np.vstack((results, [n_est, min_sam_leaf, max_depth,score]))
#results = np.vstack((results, [n_est, min_sam_leaf, max_depth,score]))
#results = np.vstack((results, [n_est, min_sam_leaf, max_depth,score]))
results[:,3]
from sklearn.ensemble import RandomForestRegressor
#rd_regtree = RandomForestRegressor(n_estimators=10,n_jobs=6,min_samples_leaf=4, random_state=99, max_depth=20)
rd_regtree = RandomForestRegressor(n_estimators=26,n_jobs=6,min_samples_leaf=2, random_state=99, max_depth=26)
#total sum of diff: 1132
#rd_regtree = RandomForestRegressor(n_estimators=40,n_jobs=-1,min_samples_split=3, random_state=99, max_depth=11)
#total sum of diff: 1129
rd_regtree.fit(x_train, y_train)
print('R²: ', rd_regtree.score(x_test, y_test))
rd_regtree.feature_importances_
tree_list = rd_regtree.estimators_
for i in range(0,len(tree_list)):
print((tree_list[i].feature_importances_))
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(results[:,0],results[:,2],results[:,3], marker='o')
plt.show()
y_pred = rd_regtree.predict(x_test)
np.linalg.norm(np.ceil(y_pred)-y_test)
diff_rd = (y_pred-y_test)
# plt.figure(figsize=(12,10)) # not needed. set values globally
plt.hist(diff_rd.values, bins=100, range=[-6,6])
print('Perzentile(%): ', [1,5,10,15,25,50,75,90,95,99], '\n', np.percentile(diff_rd.values, [1,5,10,15,25,50,75,90,95,99]))
print('Absolute time deviation (in 1k): ', sum(abs(diff_rd))/1000)
plt.title('Random Forest Regressor')
plt.xlabel('deviation in mph')
plt.ylabel('frequency')
plt.savefig('figures/bike_randomforest.png', format='png', dpi=150)
diff_rd.describe()
from sklearn.externals import joblib
joblib.dump(rd_regtree, 'randforlib/bike_regtree_26x_depth_26_mss_2_PY27.pkl', protocol=2)
! cd randforlib && zip bike_regtree_26x_depth_26_mss_2_PY27.zip bike_regtree_26x_depth_26_mss_2_PY27.pkl*
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We want to "tokenize" the text and discard "stopwords" like 'a', 'the', 'in'. These words aren't relevant for our analysis.
Step2: We want to tokenize words. We will use \w+ regular expression to tokenize all the words.
Step3: That was the easy part..... We want all the data(text) to be "normalized". The word 'Linear' is different then the word 'linear' but for our case it shouldn't be counted twice.
Step4: Now we must...clean the data yet more. It's like when you think you've cleaned your room but your mom tells you it ain't that clean yet.
Step5: Now we have a Python list of stop words and a Python list of words in our text. We want to cross reference the tokens with the stop words and save those in a new list. Lets do that....
Step6: Now comes the real fun stuff. Lets plot the word frequency histogram with two lines of actual code.
|
<ASSISTANT_TASK:>
Python Code:
# Lets see how many lines are in the PDF
# We can use the '!' special character to run Linux commands inside of our notebook
!wc -l test.txt
# Now lets see how many words
!wc -w test.txt
import nltk
from nltk import tokenize
# Lets open the file so we can access the ascii contents
# fd stands for file descriptor but we can use whatever name we want
# the open command returns a file descripor object, which itself isn't very useful
# so we need to read the entire contents so we have a text string we can parse
# advanced: use a context manager with open() as x:
fd = open('test.txt', 'r')
text = fd.read()
text
# import the regular expression module
import re
match_words = '\w+'
tokens = re.findall(match_words, text)
tokens[0:9]
# We can also use nltk to accomplish the same thing
# from nltk.tokenize import RegexpTokenizer
# tokenizer = RegexpTokenizer('\w+')
# tokenizer.tokenize(text)
words = []
for word in tokens:
words.append(word.lower())
words[0:8]
#Here we want a list of common stopwords but we need to download them first.
nltk.download('stopwords')
stop_words = nltk.corpus.stopwords.words('english')
stop_words
words_nsw = []
for w in words:
if w not in stop_words:
words_nsw.append(w)
words_nsw[0:11]
# lets import a graphing and data visualization library
import matplotlib.pyplot as plt
# Lets tell jupyter notebook to display images inside our notebook
# %matplotlib inline
freq_dist = nltk.FreqDist(words_nsw)
freq_dist.plot(30)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Discussion
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from matplotlib import pyplot
# Iterative for 1-D linear convection
def lin_convection_1d(nx=41, nt=100, dt=0.01, c=1, u_init = [2,2,2,2], init_offset = 5) :
'''
nx = 41 # Number of horizontal location points (x axis on graph)
nt = 100 # Number of time step (iteration)
dt = .01 # Resolution of the time step
c = 1 # Constant, speed of convection
'''
dx = 2./(nx-1) # Space between points
x = np.linspace(0,2,nx) # vector store value of x
u = np.ones(nx) # vector store the ground values of velocity
u[init_offset:(init_offset+len(u_init))] = u_init[:]
for n in range(nt):
un = u.copy()
for i in range(1, nx-1):
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.plot(x, u)
pyplot.axis([0, 2, .5, 2.5])
pyplot.pause(0.05)
# Freeze at the last frame
if not n == nt-1 :
pyplot.cla() # Clear the axis - wait for next plot
pyplot.show()
# Cannot keep the rectangle shape due to numerical diffusion
lin_convection_1d(u_init = 2*np.ones(10), init_offset=10)
# Higher spatial resolution keeps the shape but slow on simulation and
# the convection speed is unstable with large value
lin_convection_1d(nx=1001, dt=0.0001, nt=151, c=10,
u_init=2*np.ones(200), init_offset=100)
def nonlin_convection_1d(nx=41, nt=50, dt=.01, u_init=[2,2,2,2],
init_offset = 5, keep_all=False):
'''
nx = 41 # Number of horizontal location points (x axis on graph)
nt = 100 # Number of time step (iteration)
dt = .01 # Resolution of the time step
c = 1 # Constant, speed of convection
'''
dx = 2./(nx-1)
x = np.linspace(0,2,nx)
u = np.ones(nx)
u[init_offset:(init_offset+len(u_init))] = u_init[:]
for n in range(nt):
un = u.copy()
for i in range(1,nx-1):
u[i] = un[i]-un[i]*dt/dx*(un[i]-un[i-1])
pyplot.plot(x, u)
pyplot.axis([0, 2, .5, 2.5])
pyplot.pause(0.05)
if not keep_all :
if not n == nt-1 :
pyplot.cla()
pyplot.show()
%matplotlib inline
nonlin_convection_1d(keep_all=True)
nonlin_convection_1d(nx=101, nt=101, dt=0.005,
u_init=2*np.ones(20), init_offset=10, keep_all=True)
nonlin_convection_1d()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def rectanglearea(r ) :
if r < 0 :
return - 1
a = r * r
return a
if __name__== "__main __":
r = 5
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Impuestos corporativos
Step2: Ejemplo.-- Considere un flujo de caja de \$ 100, y una duración de 10 periodos. Calcule el impuesto de renta si la tasa es del 30% para los periodos 1 a 5 y del 35% para los periodos restantes.
Step3: Inflación
Step4: Ejemplo.-- Considere un flujo de caja constante de \$ 1000. Conviertalo a dinero del periodo 0 usando una inflación inicial del 5% por periodo. La inflación cambia al 10% a partir del periodo 5.
Step5: Devaluación
Step6: Ejemplo.-- Considere un flujo de efectivo constante de \$ 1000 durante 10 periodos. Conviértalo a moneda extranjera si la tasa de cambio es de \$ 2.
Step7: Ejemplo.-- Considere el mismo caso anterior con una devaluación por periodo del 1%.
|
<ASSISTANT_TASK:>
Python Code:
import cashflows as cf
cflo=cf.cashflow(const_value=1000, nper=10, spec=[(t,-90) for t in range(5,10)])
cf.cfloplot(cflo)
tax_rate = cf.nominal_rate(const_value=[30] * 10)
x=cf.after_tax_cashflow(cflo, # flujo de efectivo
tax_rate = tax_rate) # impuesto de renta
cf.cfloplot(x)
cflo = cf.cashflow(const_value=[100]*10)
tax_rate = cf.nominal_rate(const_value=[30] * 10,spec=(5,35))
x=cf.after_tax_cashflow(cflo, # flujo de efectivo
tax_rate = tax_rate) # impuesto de renta
cf.cfloplot(x)
x = cf.cashflow(const_value=[1000]*10)
x
cf.const2curr(cflo=x,inflation=cf.nominal_rate(const_value=[5]*10))
## flujo de caja
x = cf.cashflow(const_value=[1000]*10)
cf.cfloplot(x)
## inflacion
inflation = cf.nominal_rate(const_value=[5]*10,spec=(5,10))
cf.cfloplot(inflation)
cf.const2curr(cflo=x,inflation=inflation)
round((1 + 0.015) * (1 + 0.020) - 1, 4)
x = cf.cashflow([1000]*10)
cf.currency_conversion(cflo=x, exchange_rate = 2)
x = cf.cashflow([1000]*10)
cf.currency_conversion(cflo=x, exchange_rate = 2, devaluation=cf.nominal_rate(const_value=[1]*10))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Query data into Contiamo
Step2: Select data from the customers table
Step3: Select data from the transactions table
Step4: Merge tables and group by application type and age
Step5: Convert table into matrix
Step6: Display heatmap
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import contiamo
import seaborn as sns
import numpy as np
transactions = %contiamo query query:sql:48590597:411:g71GXzJjsx4Uvad11ouKjoYbQUNNPy-qRMKkBNZfyx4
customers = %contiamo query query:sql:48590597:441:MG5W2dMjXzYgsHsgdQYzmhv44dxEQX2Lodu5Uh2Hx_s
applications = %contiamo query query:sql:48590597:442:-gz3nbw1fdmtSXkD4zGNA-cVa7s6sQtRn8upCSn6uys
df1 = pd.DataFrame ({
'Age' : customers['Field age'],
'Customer id' : customers['Field customer id']
})
df2 = pd.DataFrame ({
'Revenue' : transactions['Field app price'],
'Application type' : transactions['Field app type'],
'Customer id' : transactions['Field customer id']
})
df = df2.set_index('Customer id').join(df1.set_index('Customer id'))
df['Revenue']=pd.to_numeric(df['Revenue'])
df = df.groupby(['Application type','Age'], as_index=False).sum()
df = df.pivot_table('Revenue', 'Application type', 'Age', fill_value=0)
df
sns.set_context("poster") # display large charts
sns.heatmap(df, annot=True, fmt=',.0f');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Polymère
Step2: Calcul de la concentration finale
Step3: Table des valeurs
Step4: Calcul de c2
Step5: Graphique
Step6: Graphique
Step7: Graphique
Step8: Grapgique
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import math
import cmath
from scipy.optimize import root
import matplotlib.pyplot as plt
%matplotlib inline
a = ("Table1.txt")
a
class InterfazPolimero:
def __init__ (self,a):
self.a=a
def Lire(self):
self.tab = pd.read_csv(self.a,sep=" ")
coef =self.tab.values
self.Experiment = coef[:,0]
self.Thickness = coef[:,1]
self.FoodSimulant = coef[:,2]
self.Cpo = coef[:,3]
self.K = coef [:,4]
self.Dp = coef[:,5]
self.RMSE = coef[:,6]
self.k = coef[:,7]
self.c4 = coef[:,8]
# self.c1 =coef[:,9]
self.c2 = np.zeros(10)
return self.tab
def inicializarC2(self):
self.c2 = np.zeros(10)
self.dimension = np.shape(self.c2)
print(self.dimension)
return self.c2
def calcul(self):
self.tab["j1"] = (self.tab["Dp"] / (self.tab["Thickness"] / 2)) * (self.tab["Cpo"] - self.c2)
print(self.tab["j1"])
self.c3 = self.c2 / self.K
self.j2 = self.k * (self.c3 - self.tab["c4"])
return (self.tab["j1"] - self.j2) / self.tab["j1"]
def calcul2(self):
i = 0
for self.tab["Thickness"], self.tab["Dp"], self.tab["K"], self.tab["k"], self.tab["c"] in enumerate(tab):
self.sol = root(calcul,15,args=(float(self.tab["Dp"]),float(self.tab["k"]),float(self.tab["K"]),float(self.tab["c4"]),float(self.tab["Cpo"]),float(self.tab["Thickness"])))
c2[i]= self.sol.x
i = i + 1
print(self.c2)
return self.c2
def Garder(self):
raw_data ={"résultat" : [1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793]}
df = pd.DataFrame(raw_data,index=["1","2","3","4","5","6","7","8","9","10"])
df.to_csv("c2rep")
return df
def Graphique(self):
plt.plot(self.tab["Dp"],self.Cpo,"^")
plt.title("f(Dp)=Cpo")
plt.xlabel("Dp")
plt.ylabel("Cpo")
def Graphique2(self):
plt.plot(self.tab["Dp"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Dp)=c2")
plt.xlabel("Dp")
plt.ylabel("c2")
def Graphique3(self):
plt.plot(self.tab["Cpo"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Cpo)=c2")
plt.xlabel("Cpo")
plt.ylabel("c2")
def Graphique4(self):
plt.plot(self.tab["Thickness"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Epaisseur)=c2")
plt.xlabel("Epaisseur")
plt.ylabel("c2")
def Graphique5(self):
fig,axes=plt.subplots(2,2)
axes[0,0].plot(self.tab["Dp"],self.Cpo,"^")
axes[1,1].plot(self.tab["Dp"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
axes[0,1].plot(self.tab["Cpo"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
axes[1,0].plot(self.tab["Thickness"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
p = InterfazPolimero("Table1.txt")
p
p.Lire()
p.calcul()
p.Graphique()
p.Graphique2()
p.Graphique3()
p.Graphique4()
p.Graphique5()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Few examples of the handwritten words are shown below. Note that the first capitalized letter has been removed.
Step3: Define Factor Types and Build Factor Graphs
Step5: Next, we write a function to construct the factor graphs and prepare labels for training. For each factor graph instance, the structure is a chain but the number of nodes and edges depend on the number of letters, where unary factors will be added for each letter, pairwise factors will be added for each pair of neighboring letters. Besides, the first and last letter will get an additional bias factor respectively.
Step6: An example of graph structure is visualized as below, from which you may have a better sense how a factor graph being built. Note that different colors are used to represent different factor types.
Step7: Training
Step8: In Shogun, we implemented several batch solvers and online solvers. Let's first try to train the model using a batch solver. We choose the dual bundle method solver (<a href="http
Step9: Let's check the duality gap to see if the training has converged. We aim at minimizing the primal problem while maximizing the dual problem. By the weak duality theorem, the optimal value of the primal problem is always greater than or equal to dual problem. Thus, we could expect the duality gap will decrease during the time. A relative small and stable duality gap may indicate the convergence. In fact, the gap doesn't have to become zero, since we know it is not far away from the local minima.
Step10: There are other statitics may also be helpful to check if the solution is good or not, such as the number of cutting planes, from which we may have a sense how tight the piece-wise lower bound is. In general, the number of cutting planes should be much less than the dimension of the parameter vector.
Step11: In our case, we have 101 active cutting planes, which is much less than 4082, i.e. the number of parameters. We could expect a good model by looking at these statistics. Now come to the online solvers. Unlike the cutting plane algorithms re-optimizes over all the previously added dual variables, an online solver will update the solution based on a single point. This difference results in a faster convergence rate, i.e. less oracle calls, please refer to Table 1 in [4] for more detail. Here, we use the stochastic subgradient descent (<a href="http
Step12: We compare the SGD and BMRM in terms of the primal objectives versus effective passes. We first plot the training progress (until both algorithms converge) and then zoom in to check the first 100 passes. In order to make a fair comparison, we set the regularization constant to 1e-2 for both algorithms.
Step13: As is shown above, the SGD solver uses less oracle calls to get to converge. Note that the timing is 2 times slower than they actually need, since there are additional computations of primal objective and training error in each pass. The training errors of both algorithms for each pass are shown in below.
Step15: Interestingly, the training errors of SGD solver are lower than BMRM's in first 100 passes, but in the end the BMRM solver obtains a better training performance. A probable explanation is that BMRM uses very limited number of cutting planes at beginning, which form a poor approximation of the objective function. As the number of cutting planes increasing, we got a tighter piecewise lower bound, thus improve the performance. In addition, we would like to show the pairwise weights, which may learn important co-occurrances of letters. The hinton diagram is a wonderful tool for visualizing 2D data, in which positive and negative values are represented by white and black squares, respectively, and the size of each square represents the magnitude of each value. In our case, a smaller number i.e. a large black square indicates the two letters tend to coincide.
Step16: Inference
Step17: Evaluation
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import numpy as np
import scipy.io
dataset = scipy.io.loadmat('../../../data/ocr/ocr_taskar.mat')
# patterns for training
p_tr = dataset['patterns_train']
# patterns for testing
p_ts = dataset['patterns_test']
# labels for training
l_tr = dataset['labels_train']
# labels for testing
l_ts = dataset['labels_test']
# feature dimension
n_dims = p_tr[0,0].shape[0]
# number of states
n_stats = 26
# number of training samples
n_tr_samples = p_tr.shape[1]
# number of testing samples
n_ts_samples = p_ts.shape[1]
import matplotlib.pyplot as plt
def show_word(patterns, index):
show a word with padding
plt.rc('image', cmap='binary')
letters = patterns[0,index][:128,:]
n_letters = letters.shape[1]
for l in xrange(n_letters):
lett = np.transpose(np.reshape(letters[:,l], (8,16)))
lett = np.hstack((np.zeros((16,1)), lett, np.zeros((16,1))))
lett = np.vstack((np.zeros((1,10)), lett, np.zeros((1,10))))
subplot(1,n_letters,l+1)
imshow(lett)
plt.xticks(())
plt.yticks(())
plt.tight_layout()
show_word(p_tr, 174)
show_word(p_tr, 471)
show_word(p_tr, 57)
from modshogun import TableFactorType
# unary, type_id = 0
cards_u = np.array([n_stats], np.int32)
w_gt_u = np.zeros(n_stats*n_dims)
fac_type_u = TableFactorType(0, cards_u, w_gt_u)
# pairwise, type_id = 1
cards = np.array([n_stats,n_stats], np.int32)
w_gt = np.zeros(n_stats*n_stats)
fac_type = TableFactorType(1, cards, w_gt)
# first bias, type_id = 2
cards_s = np.array([n_stats], np.int32)
w_gt_s = np.zeros(n_stats)
fac_type_s = TableFactorType(2, cards_s, w_gt_s)
# last bias, type_id = 3
cards_t = np.array([n_stats], np.int32)
w_gt_t = np.zeros(n_stats)
fac_type_t = TableFactorType(3, cards_t, w_gt_t)
# all initial parameters
w_all = [w_gt_u,w_gt,w_gt_s,w_gt_t]
# all factor types
ftype_all = [fac_type_u,fac_type,fac_type_s,fac_type_t]
def prepare_data(x, y, ftype, num_samples):
prepare FactorGraphFeatures and FactorGraphLabels
from modshogun import Factor, TableFactorType, FactorGraph
from modshogun import FactorGraphObservation, FactorGraphLabels, FactorGraphFeatures
samples = FactorGraphFeatures(num_samples)
labels = FactorGraphLabels(num_samples)
for i in xrange(num_samples):
n_vars = x[0,i].shape[1]
data = x[0,i].astype(np.float64)
vc = np.array([n_stats]*n_vars, np.int32)
fg = FactorGraph(vc)
# add unary factors
for v in xrange(n_vars):
datau = data[:,v]
vindu = np.array([v], np.int32)
facu = Factor(ftype[0], vindu, datau)
fg.add_factor(facu)
# add pairwise factors
for e in xrange(n_vars-1):
datap = np.array([1.0])
vindp = np.array([e,e+1], np.int32)
facp = Factor(ftype[1], vindp, datap)
fg.add_factor(facp)
# add bias factor to first letter
datas = np.array([1.0])
vinds = np.array([0], np.int32)
facs = Factor(ftype[2], vinds, datas)
fg.add_factor(facs)
# add bias factor to last letter
datat = np.array([1.0])
vindt = np.array([n_vars-1], np.int32)
fact = Factor(ftype[3], vindt, datat)
fg.add_factor(fact)
# add factor graph
samples.add_sample(fg)
# add corresponding label
states_gt = y[0,i].astype(np.int32)
states_gt = states_gt[0,:]; # mat to vector
loss_weights = np.array([1.0/n_vars]*n_vars)
fg_obs = FactorGraphObservation(states_gt, loss_weights)
labels.add_label(fg_obs)
return samples, labels
# prepare training pairs (factor graph, node states)
n_tr_samples = 350 # choose a subset of training data to avoid time out on buildbot
samples, labels = prepare_data(p_tr, l_tr, ftype_all, n_tr_samples)
try:
import networkx as nx # pip install networkx
except ImportError:
import pip
pip.main(['install', '--user', 'networkx'])
import networkx as nx
import matplotlib.pyplot as plt
# create a graph
G = nx.Graph()
node_pos = {}
# add variable nodes, assuming there are 3 letters
G.add_nodes_from(['v0','v1','v2'])
for i in xrange(3):
node_pos['v%d' % i] = (2*i,1)
# add factor nodes
G.add_nodes_from(['F0','F1','F2','F01','F12','Fs','Ft'])
for i in xrange(3):
node_pos['F%d' % i] = (2*i,1.006)
for i in xrange(2):
node_pos['F%d%d' % (i,i+1)] = (2*i+1,1)
node_pos['Fs'] = (-1,1)
node_pos['Ft'] = (5,1)
# add edges to connect variable nodes and factor nodes
G.add_edges_from([('v%d' % i,'F%d' % i) for i in xrange(3)])
G.add_edges_from([('v%d' % i,'F%d%d' % (i,i+1)) for i in xrange(2)])
G.add_edges_from([('v%d' % (i+1),'F%d%d' % (i,i+1)) for i in xrange(2)])
G.add_edges_from([('v0','Fs'),('v2','Ft')])
# draw graph
fig, ax = plt.subplots(figsize=(6,2))
nx.draw_networkx_nodes(G,node_pos,nodelist=['v0','v1','v2'],node_color='white',node_size=700,ax=ax)
nx.draw_networkx_nodes(G,node_pos,nodelist=['F0','F1','F2'],node_color='yellow',node_shape='s',node_size=300,ax=ax)
nx.draw_networkx_nodes(G,node_pos,nodelist=['F01','F12'],node_color='blue',node_shape='s',node_size=300,ax=ax)
nx.draw_networkx_nodes(G,node_pos,nodelist=['Fs'],node_color='green',node_shape='s',node_size=300,ax=ax)
nx.draw_networkx_nodes(G,node_pos,nodelist=['Ft'],node_color='purple',node_shape='s',node_size=300,ax=ax)
nx.draw_networkx_edges(G,node_pos,alpha=0.7)
plt.axis('off')
plt.tight_layout()
from modshogun import FactorGraphModel, TREE_MAX_PROD
# create model and register factor types
model = FactorGraphModel(samples, labels, TREE_MAX_PROD)
model.add_factor_type(ftype_all[0])
model.add_factor_type(ftype_all[1])
model.add_factor_type(ftype_all[2])
model.add_factor_type(ftype_all[3])
from modshogun import DualLibQPBMSOSVM
from modshogun import BmrmStatistics
import pickle
import time
# create bundle method SOSVM, there are few variants can be chosen
# BMRM, Proximal Point BMRM, Proximal Point P-BMRM, NCBM
# usually the default one i.e. BMRM is good enough
# lambda is set to 1e-2
bmrm = DualLibQPBMSOSVM(model, labels, 0.01)
bmrm.set_TolAbs(20.0)
bmrm.set_verbose(True)
bmrm.set_store_train_info(True)
# train
t0 = time.time()
bmrm.train()
t1 = time.time()
w_bmrm = bmrm.get_w()
print "BMRM took", t1 - t0, "seconds."
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4))
primal_bmrm = bmrm.get_helper().get_primal_values()
dual_bmrm = bmrm.get_result().get_hist_Fd_vector()
len_iter = min(primal_bmrm.size, dual_bmrm.size)
primal_bmrm = primal_bmrm[1:len_iter]
dual_bmrm = dual_bmrm[1:len_iter]
# plot duality gaps
xs = range(dual_bmrm.size)
axes[0].plot(xs, (primal_bmrm-dual_bmrm), label='duality gap')
axes[0].set_xlabel('iteration')
axes[0].set_ylabel('duality gap')
axes[0].legend(loc=1)
axes[0].set_title('duality gaps');
axes[0].grid(True)
# plot primal and dual values
xs = range(dual_bmrm.size-1)
axes[1].plot(xs, primal_bmrm[1:], label='primal')
axes[1].plot(xs, dual_bmrm[1:], label='dual')
axes[1].set_xlabel('iteration')
axes[1].set_ylabel('objective')
axes[1].legend(loc=1)
axes[1].set_title('primal vs dual');
axes[1].grid(True)
# statistics
bmrm_stats = bmrm.get_result()
nCP = bmrm_stats.nCP
nzA = bmrm_stats.nzA
print 'number of cutting planes: %d' % nCP
print 'number of active cutting planes: %d' % nzA
from modshogun import StochasticSOSVM
# the 3rd parameter is do_weighted_averaging, by turning this on,
# a possibly faster convergence rate may be achieved.
# the 4th parameter controls outputs of verbose training information
sgd = StochasticSOSVM(model, labels, True, True)
sgd.set_num_iter(100)
sgd.set_lambda(0.01)
# train
t0 = time.time()
sgd.train()
t1 = time.time()
w_sgd = sgd.get_w()
print "SGD took", t1 - t0, "seconds."
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4))
primal_sgd = sgd.get_helper().get_primal_values()
xs = range(dual_bmrm.size-1)
axes[0].plot(xs, primal_bmrm[1:], label='BMRM')
axes[0].plot(range(99), primal_sgd[1:100], label='SGD')
axes[0].set_xlabel('effecitve passes')
axes[0].set_ylabel('primal objective')
axes[0].set_title('whole training progress')
axes[0].legend(loc=1)
axes[0].grid(True)
axes[1].plot(range(99), primal_bmrm[1:100], label='BMRM')
axes[1].plot(range(99), primal_sgd[1:100], label='SGD')
axes[1].set_xlabel('effecitve passes')
axes[1].set_ylabel('primal objective')
axes[1].set_title('first 100 effective passes')
axes[1].legend(loc=1)
axes[1].grid(True)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4))
terr_bmrm = bmrm.get_helper().get_train_errors()
terr_sgd = sgd.get_helper().get_train_errors()
xs = range(terr_bmrm.size-1)
axes[0].plot(xs, terr_bmrm[1:], label='BMRM')
axes[0].plot(range(99), terr_sgd[1:100], label='SGD')
axes[0].set_xlabel('effecitve passes')
axes[0].set_ylabel('training error')
axes[0].set_title('whole training progress')
axes[0].legend(loc=1)
axes[0].grid(True)
axes[1].plot(range(99), terr_bmrm[1:100], label='BMRM')
axes[1].plot(range(99), terr_sgd[1:100], label='SGD')
axes[1].set_xlabel('effecitve passes')
axes[1].set_ylabel('training error')
axes[1].set_title('first 100 effective passes')
axes[1].legend(loc=1)
axes[1].grid(True)
def hinton(matrix, max_weight=None, ax=None):
Draw Hinton diagram for visualizing a weight matrix.
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x,y),w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w))
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
# get pairwise parameters, also accessible from
# w[n_dims*n_stats:n_dims*n_stats+n_stats*n_stats]
model.w_to_fparams(w_sgd) # update factor parameters
w_p = ftype_all[1].get_w()
w_p = np.reshape(w_p,(n_stats,n_stats))
hinton(w_p)
# get testing data
samples_ts, labels_ts = prepare_data(p_ts, l_ts, ftype_all, n_ts_samples)
from modshogun import FactorGraphFeatures, FactorGraphObservation, TREE_MAX_PROD, MAPInference
# get a factor graph instance from test data
fg0 = samples_ts.get_sample(100)
fg0.compute_energies()
fg0.connect_components()
# create a MAP inference using tree max-product
infer_met = MAPInference(fg0, TREE_MAX_PROD)
infer_met.inference()
# get inference results
y_pred = infer_met.get_structured_outputs()
y_truth = FactorGraphObservation.obtain_from_generic(labels_ts.get_label(100))
print y_pred.get_data()
print y_truth.get_data()
from modshogun import LabelsFactory, SOSVMHelper
# training error of BMRM method
bmrm.set_w(w_bmrm)
model.w_to_fparams(w_bmrm)
lbs_bmrm = bmrm.apply()
acc_loss = 0.0
ave_loss = 0.0
for i in xrange(n_tr_samples):
y_pred = lbs_bmrm.get_label(i)
y_truth = labels.get_label(i)
acc_loss = acc_loss + model.delta_loss(y_truth, y_pred)
ave_loss = acc_loss / n_tr_samples
print('BMRM: Average training error is %.4f' % ave_loss)
# training error of stochastic method
print('SGD: Average training error is %.4f' % SOSVMHelper.average_loss(w_sgd, model))
# testing error
bmrm.set_features(samples_ts)
bmrm.set_labels(labels_ts)
lbs_bmrm_ts = bmrm.apply()
acc_loss = 0.0
ave_loss_ts = 0.0
for i in xrange(n_ts_samples):
y_pred = lbs_bmrm_ts.get_label(i)
y_truth = labels_ts.get_label(i)
acc_loss = acc_loss + model.delta_loss(y_truth, y_pred)
ave_loss_ts = acc_loss / n_ts_samples
print('BMRM: Average testing error is %.4f' % ave_loss_ts)
# testing error of stochastic method
print('SGD: Average testing error is %.4f' % SOSVMHelper.average_loss(sgd.get_w(), model))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step8: Parsing the Dataset
Step9: Loading the Dataset
Step10: Training the Models
Step11: Evaluation Using Coherence
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import os
import re
from gensim.corpora import TextCorpus, MmCorpus
from gensim import utils, models
from gensim.parsing.preprocessing import STOPWORDS
from gensim.utils import deaccent
class TextDirectoryCorpus(TextCorpus):
Read documents recursively from a directory,
where each file is interpreted as a plain text document.
def iter_filepaths(self):
Lazily yield paths to each file in the directory structure within the specified
range of depths. If a filename pattern to match was given, further filter to only
those filenames that match.
for dirpath, dirnames, filenames in os.walk(self.input):
for name in filenames:
yield os.path.join(dirpath, name)
def getstream(self):
for path in self.iter_filepaths():
with utils.smart_open(path) as f:
doc_content = f.read()
yield doc_content
def preprocess_text(self, text):
text = deaccent(
lower_to_unicode(
strip_multiple_whitespaces(text)))
tokens = simple_tokenize(text)
return remove_short(
remove_stopwords(tokens))
def get_texts(self):
Iterate over the collection, yielding one document at a time. A document
is a sequence of words (strings) that can be fed into `Dictionary.doc2bow`.
Override this function to match your input (parse input files, do any
text preprocessing, lowercasing, tokenizing etc.). There will be no further
preprocessing of the words coming out of this function.
lines = self.getstream()
if self.metadata:
for lineno, line in enumerate(lines):
yield self.preprocess_text(line), (lineno,)
else:
for line in lines:
yield self.preprocess_text(line)
def remove_stopwords(tokens, stopwords=STOPWORDS):
return [token for token in tokens if token not in stopwords]
def remove_short(tokens, minsize=3):
return [token for token in tokens if len(token) >= minsize]
def lower_to_unicode(text):
return utils.to_unicode(text.lower(), 'ascii', 'ignore')
RE_WHITESPACE = re.compile(r"(\s)+", re.UNICODE)
def strip_multiple_whitespaces(text):
return RE_WHITESPACE.sub(" ", text)
PAT_ALPHABETIC = re.compile('(((?![\d])\w)+)', re.UNICODE)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
class NewsgroupCorpus(TextDirectoryCorpus):
Parse 20 Newsgroups dataset.
def extract_body(self, text):
return strip_newsgroup_header(
strip_newsgroup_footer(
strip_newsgroup_quoting(text)))
def preprocess_text(self, text):
body = self.extract_body(text)
return super(NewsgroupCorpus, self).preprocess_text(body)
def strip_newsgroup_header(text):
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
_PGP_SIG_BEGIN = "-----BEGIN PGP SIGNATURE-----"
def strip_newsgroup_footer(text):
Given text in "news" format, attempt to remove a signature block.
try:
return text[:text.index(_PGP_SIG_BEGIN)]
except ValueError:
return text
# Replace data_path with path to your own copy of the corpus.
# You can download it from here: http://qwone.com/~jason/20Newsgroups/
# I'm using the original, called: 20news-19997.tar.gz
home = os.path.expanduser('~')
data_dir = os.path.join(home, 'workshop', 'nlp', 'data')
data_path = os.path.join(data_dir, '20_newsgroups')
%%time
corpus = NewsgroupCorpus(data_path)
dictionary = corpus.dictionary
print(len(corpus))
print(dictionary)
%%time
mm_path = os.path.join(data_dir, '20_newsgroups.mm')
MmCorpus.serialize(mm_path, corpus, id2word=dictionary)
mm_corpus = MmCorpus(mm_path) # load back in to use for LDA training
%%time
trained_models = {}
for num_topics in range(20, 101, 10):
print("Training LDA(k=%d)" % num_topics)
lda = models.LdaMulticore(
mm_corpus, id2word=dictionary, num_topics=num_topics, workers=4,
passes=10, iterations=200, random_state=42,
alpha='asymmetric', # shown to be better than symmetric in most cases
decay=0.5, offset=64 # best params from Hoffman paper
)
trained_models[num_topics] = lda
# Build topic listings from each model.
import itertools
from gensim import matutils
def top_topics(lda, num_words=20):
str_topics = []
for topic in lda.state.get_lambda():
topic = topic / topic.sum() # normalize to probability distribution
bestn = matutils.argsort(topic, topn=num_words, reverse=True)
beststr = [lda.id2word[_id] for _id in bestn]
str_topics.append(beststr)
return str_topics
model_topics = {}
super_topic = set()
for num_topics, model in trained_models.items():
topics_as_topn_terms = top_topics(model)
model_topics[num_topics] = topics_as_topn_terms
super_topic.update(itertools.chain.from_iterable(topics_as_topn_terms))
print("Number of relevant terms: %d" % len(super_topic))
%%time
# Now estimate the probabilities for the CoherenceModel
cm = models.CoherenceModel(
topics=[super_topic], texts=corpus.get_texts(),
dictionary=dictionary, coherence='c_v')
cm.estimate_probabilities()
%%time
import numpy as np
# Next we perform the coherence evaluation for each of the models.
# Since we have already precomputed the probabilities, this simply
# involves using the accumulated stats in the `CoherenceModel` to
# perform the evaluations, which should be pretty quick.
coherences = {}
for num_topics, topics in model_topics.items():
cm.topics = topics
# We evaluate at various values of N and average them. This is a more robust,
# according to: http://people.eng.unimelb.edu.au/tbaldwin/pubs/naacl2016.pdf
coherence_at_n = {}
for n in (20, 15, 10, 5):
cm.topn = n
topic_coherences = cm.get_coherence_per_topic()
# Let's record the coherences for each topic, as well as the aggregated
# coherence across all of the topics.
coherence_at_n[n] = (topic_coherences, cm.aggregate_measures(topic_coherences))
topic_coherences, avg_coherences = zip(*coherence_at_n.values())
avg_topic_coherences = np.vstack(topic_coherences).mean(0)
avg_coherence = np.mean(avg_coherences)
print("Avg coherence for num_topics=%d: %.5f" % (num_topics, avg_coherence))
coherences[num_topics] = (avg_topic_coherences, avg_coherence)
# Print the coherence rankings
avg_coherence = \
[(num_topics, avg_coherence)
for num_topics, (_, avg_coherence) in coherences.items()]
ranked = sorted(avg_coherence, key=lambda tup: tup[1], reverse=True)
print("Ranked by average '%s' coherence:\n" % cm.coherence)
for item in ranked:
print("num_topics=%d:\t%.4f" % item)
print("\nBest: %d" % ranked[0][0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question
Step2: Smoothed particle hydrodynamics
Step3: Applications
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import math
from numba import jit
N = 10000
x = np.random.randn(N, 2);
y = np.random.randn(N, 2);
charges = np.ones(N)
res = np.zeros(N)
@jit
def compute_nbody_direct(N, x, y, charges, res):
for i in xrange(N):
res[i] = 0.0
for j in xrange(N):
dist = (x[i, 0] - y[i, 0]) ** 2 + (x[i, 1] - y[i, 1]) ** 2
dist = math.sqrt(dist)
res[i] += charges[j] / dist
%timeit compute_nbody_direct(N, x, y, charges, res)
from IPython.display import YouTubeVideo
YouTubeVideo('UC5pDPY5Nz4')
from IPython.display import YouTubeVideo
YouTubeVideo('6bdIHFTfTdU')
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/alex.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unfortunately, when you go to execute the code block, Python throws an error. Some friend! How can we fix the code so that it runs correctly?
Step2: <div class="alert alert-danger">
Step3: Using print Statements
Step4: This runs the code to completion, resulting in the same error we saw earlier. However, because we placed the print statement in our code immediately before the error occurred, we see that IPython also printed the contents of the axis object above the traceback. Thus, print statements are an alternative means of checking the values of local variables without using the IPython debugger. Just remember to remove the print statements before validating your code!
|
<ASSISTANT_TASK:>
Python Code:
# for inline plotting in the notebook
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def plot_log():
figure, axis = plt.subplots(2, 1)
x = np.linspace(1, 2, 10)
axis.plot(x, np.log(x))
plt.show()
plot_log() # Call the function, generate plot
# Uncomment the following line and run the cell to debug the previous function:
#%debug
def plot_log():
figure, axis = plt.subplots()
x = np.linspace(1, 2, 10)
axis.plot(x, np.log(x))
plt.show()
plot_log() # Call the function, generate plot
def plot_log():
figure, axis = plt.subplots(2,1)
x = np.linspace(1, 2, 10)
print(axis)
axis.plot(x, np.log(x))
plt.show()
plot_log() # Call the function, generate plot
?plt.subplots
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up a Cloud Service
Step2: Create Storage Account
Step3: Working with Containers
Step4: Working with Blobs
Step5: Cleaning Up
Step6: Miscellaneous
|
<ASSISTANT_TASK:>
Python Code:
# standard library
import os
import time
import shutil
# Load Python SDK
from azure import *
from azure.servicemanagement import *
from azure.storage import *
# Subscription details
subscription_id = '1a61650c-ada5-4173-a8da-2a4ffcfab747'
certificate_path = 'mycert.pem'
# Initialize connection
sms = ServiceManagementService(subscription_id, certificate_path)
cs_name, cs_label = 'softEcon', 'softEcon'
cs_desc, cs_location = 'Cloud Service for this lecture.', 'West US'
sms.create_hosted_service(cs_name, cs_label, cs_desc, cs_location)
sa_name, sa_label = 'lecture2', 'lecture2'
sa_desc, sa_location = 'Storage Account for this lecture.', 'West US'
sms.create_storage_account(sa_name, sa_desc, sa_label, location=sa_location)
# Get key
sa_key = None
while sa_key is None:
try:
sa_key = sms.get_storage_account_keys(sa_name).storage_service_keys.primary
except WindowsAzureMissingResourceError:
pass
time.sleep(5)
#Initialize connection to storate account
blob_service = BlobService(sa_name, sa_key)
# Specify container
cn_names = ['movies', 'pictures']
# Create containers
for cn_name in cn_names:
# Public
blob_service.create_container(cn_name, x_ms_blob_public_access='container')
# Create directory structure
DOWNLOAD_DIRECTORY, UPLOAD_DIRECTORY = 'downloads/', 'uploads/'
os.mkdir(UPLOAD_DIRECTORY), os.mkdir(DOWNLOAD_DIRECTORY)
# Create some data
bb_names = ['mov1.avi', 'img1.jpg', 'img2.jpg']
for bb_name in bb_names:
file_name = UPLOAD_DIRECTORY + bb_name
with open(file_name, 'w') as file_:
file_.write('This is some data.')
# Select container.
def get_container(bb_name):
''' Get appropriate container for file.
'''
# Sort by file extension
if 'avi' in bb_name:
cn_name = 'movies'
elif 'jpg' in bb_name:
cn_name = 'pictures'
else:
raise AssertionError
# Finishing
return cn_name
# Upload Blob into container
for bb_name in bb_names:
# Select container
cn_name = get_container(bb_name)
# Upload file to container
file_path = UPLOAD_DIRECTORY + bb_name
blob_service.put_block_blob_from_path(cn_name, bb_name, file_path)
# List content in container
blobs = blob_service.list_blobs(cn_name)
# Download Blob from container
for bb_name in bb_names:
# Select container
cn_name = get_container(bb_name)
# Construct file name
file_path = DOWNLOAD_DIRECTORY + bb_name
# Download Blob
blob_service.get_blob_to_path(cn_name, bb_name, file_path)
# Delete Blobs
for bb_name in bb_names:
cn_name = get_container(bb_name)
blob_service.delete_blob(cn_name, bb_name)
# Deleting container
for cn_name in cn_names:
blob_service.delete_container(cn_name)
# Delete storage account
sms.delete_storage_account(sa_name)
# Delete cloud service
sms.delete_hosted_service(cs_name)
# Delete directories
shutil.rmtree(UPLOAD_DIRECTORY), shutil.rmtree(DOWNLOAD_DIRECTORY)
import urllib; from IPython.core.display import HTML
HTML(urllib.urlopen('http://bit.ly/1K5apRH').read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fully-Connected Neural Nets
Step4: Affine layer
Step5: Affine layer
Step6: ReLU layer
Step7: ReLU layer
Step8: "Sandwich" layers
Step9: Loss layers
Step10: Two-layer network
Step11: Solver
Step12: Multilayer network
Step13: As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs.
Step14: Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs.
Step15: <h3> Not getting the 100% accuracy even after trying a lot of learning rates and weights</h3>
Step16: Inline question
Step17: Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.
Step18: RMSProp and Adam
Step19: Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules
Step20: Train a good model!
Step21: Test you model
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
# Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],
[ 3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
print 'Testing affine_forward function:'
print 'difference: ', rel_error(out, correct_out)
# Test the affine_backward function
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be around 1e-10
print 'Testing affine_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
# Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[ 0., 0., 0., 0., ],
[ 0., 0., 0.04545455, 0.13636364,],
[ 0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 1e-8
print 'Testing relu_forward function:'
print 'difference: ', rel_error(out, correct_out)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 1e-12
print 'Testing relu_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
from cs231n.layer_utils import affine_relu_forward, affine_relu_backward
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
print 'Testing affine_relu_forward:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False)
loss, dx = svm_loss(x, y)
# Test svm_loss function. Loss should be around 9 and dx error should be 1e-9
print 'Testing svm_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print '\nTesting softmax_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-2
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std)
print 'Testing initialization ... '
W1_std = abs(model.params['W1'].std() - std)
b1 = model.params['b1']
W2_std = abs(model.params['W2'].std() - std)
b2 = model.params['b2']
assert W1_std < std / 10, 'First layer weights do not seem right'
assert np.all(b1 == 0), 'First layer biases do not seem right'
assert W2_std < std / 10, 'Second layer weights do not seem right'
assert np.all(b2 == 0), 'Second layer biases do not seem right'
print 'Testing test-time forward pass ... '
model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T
scores = model.loss(X)
correct_scores = np.asarray(
[[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],
[12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],
[12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print 'Testing training loss (no regularization)'
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
for reg in [0.0, 0.7]:
print 'Running numeric gradient check with reg = ', reg
model.reg = reg
loss, grads = model.loss(X, y)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
print data.viewkeys()
data['X_train'].shape
model = TwoLayerNet()
solver = Solver(model, data, update_rule = 'sgd',
optim_config= {'learning_rate': 1e-3},lr_decay = 0.95,num_epochs = 10,
batch_size = 100,print_every = 1000)
##############################################################################
# TODO: Use a Solver instance to train a TwoLayerNet that achieves at least #
# 50% accuracy on the validation set. #
##############################################################################
solver.train()
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Run this cell to visualize training loss and train / val accuracy
plt.subplot(2, 1, 1)
plt.title('Training loss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(solver.train_acc_history, '-o', label='train')
plt.plot(solver.val_acc_history, '-o', label='val')
plt.plot([0.5] * len(solver.val_acc_history), 'k--')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
# TODO: Use a three-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 1e-2
learning_rate = 1e-2 #changed here from 1e-4 to 1e-2
model = FullyConnectedNet([100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
# TODO: Use a five-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
learning_rate = 1e-2
weight_scale = 1e-2
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
print np.random.randn(1,2)
print np.random.uniform(-6,-1)
#from the code above which i have ommited for running now as it takes a lot of time
best_learning_rate,best_weight_scale = 0.0253234087295,0.0522855653841
# TODO: Use a five-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
learning_rate = best_learning_rate
weight_scale = best_weight_scale
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
from cs231n.optim import sgd_momentum
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-3, 'velocity': v}
next_w, _ = sgd_momentum(w, dw, config=config)
expected_next_w = np.asarray([
[ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789],
[ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526],
[ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263],
[ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]])
expected_velocity = np.asarray([
[ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158],
[ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105],
[ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053],
[ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]])
print 'next_w error: ', rel_error(next_w, expected_next_w)
print 'velocity error: ', rel_error(expected_velocity, config['velocity'])
num_train = 4000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
solvers = {}
for update_rule in ['sgd', 'sgd_momentum']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': 1e-2,
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
# Test RMSProp implementation; you should see errors less than 1e-7
from cs231n.optim import rmsprop
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'cache': cache}
next_w, _ = rmsprop(w, dw, config=config)
expected_next_w = np.asarray([
[-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],
[-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774],
[ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447],
[ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]])
expected_cache = np.asarray([
[ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321],
[ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377],
[ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936],
[ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'cache error: ', rel_error(expected_cache, config['cache'])
# Test Adam implementation; you should see errors around 1e-7 or less
from cs231n.optim import adam
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}
next_w, _ = adam(w, dw, config=config)
expected_next_w = np.asarray([
[-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],
[-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929],
[ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969],
[ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]])
expected_v = np.asarray([
[ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,],
[ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,],
[ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,],
[ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]])
expected_m = np.asarray([
[ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474],
[ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316],
[ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158],
[ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'v error: ', rel_error(expected_v, config['v'])
print 'm error: ', rel_error(expected_m, config['m'])
learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}
for update_rule in ['adam', 'rmsprop']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': learning_rates[update_rule]
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
best_model = None
################################################################################
# TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might #
# batch normalization and dropout useful. Store your best model in the #
# best_model variable. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
y_test_pred = np.argmax(best_model.loss(X_test), axis=1)
y_val_pred = np.argmax(best_model.loss(X_val), axis=1)
print 'Validation set accuracy: ', (y_val_pred == y_val).mean()
print 'Test set accuracy: ', (y_test_pred == y_test).mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use.
Step3: Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Step4: I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
Step5: Hyperparameters
Step6: Write out the graph for TensorBoard
Step7: Training
Step8: Sampling
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
def split_data(chars, batch_size, num_steps, split_frac=0.9):
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
with tf.name_scope("RNN_layers"):
cell = tf.contrib.rnn.MultiRNNCell([
tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(lstm_size), output_keep_prob=keep_prob) for _ in range(num_layers)
])
with tf.name_scope("RNN_init_state"):
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
with tf.name_scope("RNN_forward"):
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one row for each cell output
with tf.name_scope('sequence_reshape'):
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN putputs to a softmax layer and calculate the cost
with tf.name_scope('logits'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
with tf.name_scope('predictions'):
preds = tf.nn.softmax(logits, name='predictions')
with tf.name_scope('cost'):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
# Optimizer for training, using gradient clipping to control exploding gradients
with tf.name_scope('train'):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
file_writer = tf.summary.FileWriter('./logs/3', sess.graph)
!mkdir -p checkpoints/anna
epochs = 10
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Packages Imported
Step2: Number of Home Runs by Year
Step3: Number of Stolen Bases by Year
Step4: Number of Doubles by Year
Step5: Number of Hits by Year
Step6: Stacked Bar Graph of Hits
Step7: % of Home Runs
|
<ASSISTANT_TASK:>
Python Code:
import sys # system module
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import numpy as np # foundation for Pandas
%matplotlib inline
# check versions
print('Python version:', sys.version)
print('Pandas version: ', pd.__version__)
print('Today: ', dt.date.today())
url = 'https://raw.githubusercontent.com/maxtoki/baseball_R/master/data/Batting.csv' # importing our data
bs = pd.read_csv(url)
bs.head() # taking a look at the data
print('Dimensions: ', bs.shape) # looking at the categories I can work with
print('Column labels: ', bs.columns)
print('Row labels: ', bs.index)
names = list(bs) # Changing the year label
bs = bs.rename(columns={names[1]: 'Year'})
bs.head(2)
bsmall = bs.head() #inspecting the data
bsmall
bsmall.shape
bsmall.describe # inspecting the data
hrs_by_year = bs.groupby(['Year'])['HR'].sum() # grouping data by year and hrs
hrs_by_year[2004] # check a data point
hrs_by_year.tail(25)
plt.figure(figsize = (16,7))
plt.plot(hrs_by_year.tail(25), color = 'red', marker= '*')
plt.suptitle('Total Home Runs by Year', fontsize=18)
plt.xlabel('Year', fontsize=12)
plt.ylabel('Home Runs', fontsize=12)
sb_by_year = bs.groupby(['Year'])['SB'].sum() # grouping data by year and stolen bases
sb_by_year.tail(25)
plt.figure(figsize = (16,7)) # plotting the data
plt.plot(sb_by_year.tail(25), color = 'blue', marker= '*')
plt.suptitle('Stolen Bases by Year', fontsize=18)
plt.xlabel('Year', fontsize=12)
plt.ylabel('Stolen Bases', fontsize=12)
scndbase_by_year = bs.groupby(['Year'])['2B'].sum() # grouping data by year and doubles
scndbase_by_year.tail(25)
plt.figure(figsize = (16,7)) # plotting our data
plt.plot(scndbase_by_year.tail(25), color = 'green', marker= '*')
plt.suptitle('Doubles by Year', fontsize=18)
plt.xlabel('Year', fontsize=12)
plt.ylabel('Doubles', fontsize=12)
hits_by_year = bs.groupby(['Year'])['H'].sum() # grouping data by year and hits
hits_by_year.tail(25)
plt.figure(figsize = (16,7)) # plotting our data
plt.plot(hits_by_year.tail(25), color = 'blue', marker= '*',)
plt.suptitle('Hits by Year', fontsize=18)
plt.xlabel('Year', fontsize=12)
plt.ylabel('Hits', fontsize=12)
df = bs.groupby(['Year'])['2B','HR','H'].sum() #looking at the homeruns and doubles as a % of total hits
df = df.tail(25)
df['Other'] = df['H'] - df['HR'] - df['2B']
df['% HR'] = df['HR'] / df['H']
df['% 2B'] = df['2B'] / df['H']
df['% Other'] = df['Other'] / df['H']
df = df.drop ('2B', 1)
df = df.drop ('HR', 1)
df = df.drop ('H', 1)
df = df.drop ('Other', 1)
df
my_plot = df.plot(kind='bar',stacked=True,figsize=(16,7), fontsize = (14)) # creating a stacked bar graph
my_plot.set_title("Hits by Category", fontsize = (16))
my_plot.set_xlabel("Year", fontsize = (14))
my_plot.set_ylabel("Hits", fontsize = (14))
my_plot.legend(["2B","HR","Other"], loc=9,ncol=3)
df2 = df.plot(kind='barh', figsize=(16,7), fontsize = (14), y = '% HR', title = '% of Hits HRs') # ploting hrs as a % of total hits
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interact with SVG display
Step5: Write a function named draw_circle that draws a circle using SVG. Your function should take the parameters of the circle as function arguments and have defaults as shown. You will have to write the raw SVG code as a Python string and then use the IPython.display.SVG object and IPython.display.display function.
Step6: Use interactive to build a user interface for exploing the draw_circle function
Step7: Use the display function to show the widgets created by interactive
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import (display, SVG)
from IPython.html.widgets import interact, interactive, fixed
from IPython.html import widgets
s =
<svg width="100" height="100">
<circle cx="50" cy="50" r="20" fill="aquamarine" />
</svg>
SVG(s)
def draw_circle(width=100, height=100, cx=25, cy=25, r=5, fill='red'):
Draw an SVG circle.
Parameters
----------
width : int
The width of the svg drawing area in px.
height : int
The height of the svg drawing area in px.
cx : int
The x position of the center of the circle in px.
cy : int
The y position of the center of the circle in px.
r : int
The radius of the circle in px.
fill : str
The fill color of the circle.
c = <svg width="%s" height="%s"> <circle cx="%s" cy="%s" r="%s" fill="%s" /> </svg> % (width,height,cx,cy,r,
fill)
display(SVG(c))
draw_circle(cx=10, cy=10, r=10, fill='blue')
assert True # leave this to grade the draw_circle function
w = interactive(draw_circle, width=fixed(300), height=fixed(300), cx=(0,300,10), cy=(0,300,10), r=(0,50), fill='red')
c = w.children
assert c[0].min==0 and c[0].max==300
assert c[1].min==0 and c[1].max==300
assert c[2].min==0 and c[2].max==50
assert c[3].value=='red'
w
assert True # leave this to grade the display of the widget
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: B
Step3: By deleting 2 of the 500 datapoints, the projection on the PCs change about 10 degrees. Hence the PCs itself change significantly.
Step4: The data points at indices 99, 111, 199 and 211 (marked in red in the plot above) are apparently outliers and will be discarded.
Step5: B
Step6: The first two PCs seem sufficient to represent the data. The third and fourth PCs are negligible.
Step7: C
Step8: D
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
data1 = pd.read_csv('pca2.csv', sep=',')
data1.ix[0:2, 'X1':'X2']
data1.ix[0:2, :]
data1.shape
m = np.mean(data1, 0)
data1_centered = data1 - m
data1_centered.ix[0:2, :]
covariance1 = np.cov(data1_centered.T)
evals1, evecs1 = np.linalg.eig(covariance1)
transmat1 = evecs1.T
evec1 = transmat1[0]
evec2 = transmat1[1]
data1_trans = np.array([[0.0, 0.0] for i in range(len(data1))])
for i in range(len(data1)):
data1_trans[i] = np.dot(transmat1, data1_centered.ix[i, :])
data1_trans[0:3, :]
# plt.figure(figsize=(10, 10))
fig, ax = plt.subplots(figsize=(10, 10))
plt.scatter(data1_centered.ix[:, 'X1'], data1_centered.ix[:, 'X2'])
ax.set_color_cycle(['black', 'red'])
plt.plot([0, evec1[0]], [0, evec1[1]])
plt.plot([0, evec2[0]], [0, evec2[1]])
plt.grid()
plt.show()
plt.figure(figsize=(10, 10))
plt.scatter(data1_trans.T[0], data1_trans.T[1])
plt.grid()
plt.show()
data1b_c = np.copy(data1_centered)
data1b_c[16, :] = 0
data1b_c[156, :] = 0
cov1b = np.cov(data1b_c.T)
evals1b, evecs1b = np.linalg.eig(cov1b)
transmat1b = evecs1b.T
evec1b = transmat1b[0]
evec2b = transmat1b[1]
data1b_t = np.array([[0.0, 0.0] for i in range(len(data1))])
for i in range(len(data1)):
data1b_t[i] = np.dot(transmat1b, data1b_c[i, :])
plt.figure(figsize=(10, 10))
plt.scatter(data1_trans.T[0], data1_trans.T[1])
plt.scatter(data1b_t.T[0], data1b_t.T[1])
plt.grid()
plt.show()
data = np.loadtxt('pca4.csv', skiprows=1, delimiter=',')
data.shape, data[:10]
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
outliers = [99, 199, 111, 211]
for i, ax in enumerate(axes.flatten()):
plt.sca(ax)
plt.scatter(range(len(data)), data[:, i], c=['r' if a in outliers else 'b' for a in range(len(data))])
plt.ylabel('x{}'.format(i+1))
plt.xlabel('Index')
plt.ylim(-14, 14)
filtered_data = np.delete(data, outliers, axis=0)
filtered_data.shape
centered_data = filtered_data - filtered_data.mean(axis=0)
centered_data[:10]
cov = np.cov(centered_data.T)
cov.shape
evals, evecs = np.linalg.eig(cov)
plt.plot(evals, 'o')
plt.ylabel('Eigenvalue')
plt.xlabel('Component')
plt.ylim(0)
evals
projected_data = np.dot(evecs.T, centered_data.T).T
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
for i, ax in enumerate(axes.flatten()):
plt.sca(ax)
plt.plot(projected_data[:, i], '.')
plt.ylabel('u{}'.format(i+1))
plt.xlabel('Index')
plt.ylim(-6, 6)
whitened_data = evecs.T.dot(centered_data.T).T.dot(np.diag(1 / np.sqrt(evals)))
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
for i, ax in enumerate(axes.flatten()):
plt.sca(ax)
plt.plot(whitened_data[:, i], '.')
plt.ylabel('w{}'.format(i+1))
plt.xlabel('Index')
plt.ylim(-6, 6)
plt.imshow(cov, cmap='Blues')
plt.colorbar(label='Covariance of original data')
plt.xlabel('Dimension')
plt.ylabel('Dimension')
projected_cov = np.cov(projected_data.T)
plt.imshow(projected_cov, cmap='Blues')
plt.colorbar(label='Covariance of projected data')
plt.xlabel('Dimension')
plt.ylabel('Dimension')
whitened_cov = np.cov(whitened_data.T)
plt.imshow(whitened_cov, cmap='Blues')
plt.colorbar(label='Covariance of whitened data')
plt.xlabel('Dimension')
plt.ylabel('Dimension')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using Gaussian prior for k1
Step2: Using mixed priors and estimate both k1 and d1
Step3: Check mcmc_results.csv for the results of the MCMC procedure and perform your own analysis.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 20
%matplotlib inline
import bioscrape as bs
from bioscrape.types import Model
from bioscrape.inference import py_inference
import numpy as np
import pylab as plt
import pandas as pd
# Import a bioscrape/SBML model
M = Model(sbml_filename = 'toy_sbml_model.xml')
# Import data from CSV
# Import a CSV file for each experiment run
df = pd.read_csv('test_data.csv', delimiter = '\t', names = ['X','time'], skiprows = 1)
M.set_species({'X':df['X'][0]})
# Create prior for parameters
prior = {'d1' : ['gaussian', 0.2, 200]}
sampler, pid = py_inference(Model = M, exp_data = df, measurements = ['X'], time_column = ['time'],
nwalkers = 5, init_seed = 0.15, nsteps = 1500, sim_type = 'deterministic',
params_to_estimate = ['d1'], prior = prior)
%matplotlib inline
import bioscrape as bs
from bioscrape.types import Model
from bioscrape.inference import py_inference
import numpy as np
import pylab as plt
import pandas as pd
# Import a bioscrape/SBML model
M = Model(sbml_filename = 'toy_sbml_model.xml')
# Import data from CSV
# Import a CSV file for each experiment run
df = pd.read_csv('test_data.csv', delimiter = '\t', names = ['X','time'], skiprows = 1)
M.set_species({'X':df['X'][0]})
prior = {'d1' : ['gaussian', 0.2, 20], 'k1' : ['uniform', 0, 100]}
sampler, pid = py_inference(Model = M, exp_data = df, measurements = ['X'], time_column = ['time'],
nwalkers = 20, init_seed = 0.15, nsteps = 5500, sim_type = 'deterministic',
params_to_estimate = ['d1', 'k1'], prior = prior)
from bioscrape.simulator import py_simulate_model
M_fit = Model(sbml_filename = 'toy_sbml_model.xml')
M_fit.set_species({'X':df['X'][0]})
timepoints = pid.timepoints
flat_samples = sampler.get_chain(discard=200, thin=15, flat=True)
inds = np.random.randint(len(flat_samples), size=200)
for ind in inds:
sample = flat_samples[ind]
for pi, pi_val in zip(pid.params_to_estimate, sample):
M_fit.set_parameter(pi, pi_val)
plt.plot(timepoints, py_simulate_model(timepoints, Model= M_fit)['X'], "C1", alpha=0.1)
# plt.errorbar(, y, yerr=yerr, fmt=".k", capsize=0)
# plt.plot(timepoints, list(pid.exp_data['X']), label = 'data')
plt.plot(timepoints, py_simulate_model(timepoints, Model = M)['X'], "k", label="original model")
plt.legend(fontsize=14)
plt.xlabel("Time")
plt.ylabel("[X]");
flat_samples = sampler.get_chain(discard = 200, thin = 15,flat = True)
flat_samples
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Data Sampling
Step3: Smokers and Nonsmokers
Step4: The histogram below displays the distribution of birth weights of the babies of the non-smokers and smokers in the sample.
Step5: Both distributions are approximately bell shaped and centered near 120 ounces. The distributions are not identical, of course, which raises the question of whether the difference reflects just chance variation or a difference in the distributions in the population.
Step8: The P-value is very, very small. As a result, we can reject the null hypothesis and conclude that in the population, the distribution of birth weights for babies of mothers who smoke and those that don't smoke are different.
Step9: This bootstrapped confidence interval tells us that on average non-smoking mothers had babies that weighed between 5.8 to 12.8 ounces larger than their smoking counter parts. Furthermore, because 0 is not included in the confidence interval between the difference in the means, we can tell that this distributions are different.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
# Import libraries
from __future__ import absolute_import, division, print_function
# Ignore warnings
import warnings
#warnings.filterwarnings('ignore')
import sys
sys.path.append('tools/')
import numpy as np
import pandas as pd
import scipy.stats as st
# Graphing Libraries
import matplotlib.pyplot as pyplt
import seaborn as sns
sns.set_style("white")
# Configure for presentation
np.set_printoptions(threshold=50, linewidth=50)
import matplotlib as mpl
mpl.rc('font', size=16)
from IPython.display import display
def axis_tick_frequency(ax, axis, freq):
The frequency of the y axis tick marks
Attributes
----------
ax: matplotlib axis object
axis: char eithher 'y' or 'x'
freq: int, the integer value of which the range moves
if axis == 'y':
start, end = ax.get_ylim()
ax.yaxis.set_ticks(np.arange(start, end, freq))
elif axis == 'x':
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, freq))
else:
raise ValueError('{argument} is not a valid axis object'.format(argument=repr(axis)))
def sample(num_sample, top, with_replacement=False):
Create a random sample from a table
Attributes
---------
num_sample: int
top: dataframe
with_replacement: boolean
Returns a random subset of table index
df_index = []
lst = np.arange(0, len(top), 1)
for i in np.arange(0, num_sample, 1):
# pick randomly from the whole table
sample_index = np.random.choice(lst)
if with_replacement:
# store index
df_index.append(sample_index)
else:
# remove the choice that was selected
lst = np.setdiff1d(lst,[sample_index])
df_index.append(sample_index)
return df_index
baby_df = pd.read_csv('data/baby.csv')
baby_df.head()
weight_smoke = baby_df[['Birth Weight', 'Maternal Smoker']]
weight_smoke['Maternal Smoker'].value_counts()
smoker = baby_df['Maternal Smoker'] == True
non_smoker = baby_df['Maternal Smoker'] == False
df_non_smoker = baby_df.ix[baby_df[non_smoker].index, :]
df_non_smoker.columns = [u'Non Smoker Birth Weight', u'Gestational Days', u'Maternal Age',
u'Maternal Height', u'Maternal Pregnancy Weight', u'Maternal Smoker']
df_smoker = baby_df.ix[baby_df[smoker].index, :]
df_smoker.columns = [u'Smoker Birth Weight', u'Gestational Days', u'Maternal Age',
u'Maternal Height', u'Maternal Pregnancy Weight', u'Maternal Smoker']
df_non_smoker['Non Smoker Birth Weight'].plot.hist(bins=np.arange(40, 186, 5), normed=True, alpha = 0.8)
df_smoker['Smoker Birth Weight'].plot.hist(bins=np.arange(40, 186, 5), normed=True, alpha = 0.8)
pyplt.ylabel("percent per ounce")
pyplt.xlabel("Birth Weight (ounce)")
pyplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.);
a = df_non_smoker['Non Smoker Birth Weight'].values
b = df_smoker['Smoker Birth Weight'].values
# difference in the means
a.mean() - b.mean()
raw = {
'Maternal Smoker': [False, True],
'Birth Weight mean': [123.085, 113.819]
}
means_table = pd.DataFrame(raw)
means_table
statistic, pvalue = st.ttest_ind(a, b)
print ('T statistic: %.2f'%statistic,'\nP-value:%.2f'%pvalue)
import scikits.bootstrap as bootstrap
import scipy
# compute 95% confidence intervals around the mean
CIs = bootstrap.ci(baby_df[['Birth Weight', 'Maternal Smoker']], scipy.mean)
print ("Bootstrapped 95% confidence interval around the mean\nLow:", CIs[0], "\nHigh:", CIs[1])
# bootstrap 5000 samples instead of only 1174
CIs = bootstrap.ci(baby_df[['Birth Weight', 'Maternal Smoker']], scipy.mean, n_samples=5000)
print ("Bootstrapped 95% confidence interval with 5,000 samples\nLow:", CIs[0], "\nHigh:", CIs[1])
def get_means(df, variable, classes):
Gets the means of a variable grouped by its class
Attributes
-------------
df: a pandas dataframe
variable: column
classes: column (bool)
class_a = df[classes] == True
class_b = df[classes] == False
df_class_b = df.ix[df[class_b].index, :]
df_class_a = df.ix[df[class_a].index, :]
a = df_class_b[variable].values
b = df_class_a[variable].values
# difference in the means
a.mean() - b.mean()
raw = {
classes: [False, True],
variable: [a.mean(), b.mean()]
}
means_table = pd.DataFrame(raw)
return means_table
def bootstrap_ci_means(table, variable, classes, repetitions):
Bootstrap approximate 95% confidence interval
for the difference between the means of the two classes
in the population
Attributes
-------------
table: a pandas dataframe
variable: column
classes: column (bool)
repetitions: int
t = table[[variable, classes]]
mean_diffs = []
for i in np.arange(repetitions):
bootstrap_sampl = table.ix[sample(len(table), table, with_replacement=True), :]
m_tbl = get_means(bootstrap_sampl, variable, classes)
new_stat = m_tbl.ix[0, variable] - m_tbl.ix[1, variable]
mean_diffs = np.append(mean_diffs, new_stat)
left = np.percentile(mean_diffs, 2.5)
right = np.percentile(mean_diffs, 97.5)
# Find the observed test statistic
means_table = get_means(t, variable, classes)
obs_stat = means_table.ix[0, variable] - means_table.ix[1, variable]
df = pd.DataFrame()
df['Difference Between Means'] = mean_diffs
df.plot.hist(bins=20, normed=True)
plot([left, right], [0, 0], color='yellow', lw=8);
print('Observed difference between means:', obs_stat)
print('Approximate 95% CI for the difference between means:')
print(left, 'to', right)
bootstrap_ci_means(baby_df, 'Birth Weight', 'Maternal Smoker', 5000)
bootstrap_ci_means(baby_df, 'Maternal Age', 'Maternal Smoker', 5000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This first line is only necessary for ipython noteboooks - it allows the plots to be shown on this page instead of in interactive mode
Step2: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step3: And we'll attach some dummy datasets. See Datasets for more details.
Step4: And run the forward models. See Computing Observables for more details.
Step5: Showing and Saving
Step6: Any call to plot returns 2 lists - a list of the axes and a list of the artists that were drawn on those axes. Generally we won't need to do anything with these, but having them returned could come in handy if you want to manually edit those axes or artists before saving the image.
Step7: Time (highlight and uncover)
Step8: To change the style of the "highlighted" points, you can pass matplotlib recognized markers, colors, and markersizes to the highlight_marker, highlight_color, and highlight_ms keywords, respectively.
Step9: To disable highlighting, simply send highlight=False
Step10: Uncover
Step11: Selecting Datasets
Step12: An advantage to this last approach (providing a twig as a positional argument to the plot method) is that it can accept multiple positional arguments to plot from multiple datasets in a single call.
Step13: If the datasets have multiple dataset kinds, subplots will automatically be created.
Step14: Later we'll see how to customize the layout of these subplots in the figure and how to pass other plotting options.
Step15: To see the list of available qualifiers that could be passed for x or y, call the qualifiers (or twigs) property on the ParameterSet.
Step16: For more information on each of the available arrays, see the relevant tutorial on that dataset method
Step17: Units
Step18: WARNING
Step19: Axes Limits
Step20: Errorbars
Step21: To disable the errorbars, simply set yerrors=None.
Step22: Colors
Step23: In addition, you can point to an array in the dataset to use as color.
Step24: Choosing colors works slightly differently for meshes (ie you can set facecolor and edgecolor and facecmap and edgecmap). For more details, see the tutorial on the mesh dataset.
Step25: Labels and Legends
Step26: The legend labels are generated automatically, but can be overriden by passing a string to the label keyword.
Step27: Other Plotting Options
Step28: Custom Subplots
Step29: Alternatively, this can be done in a single call to plot by passing dictionaries as positional arguments. Each dictionary, in essence, is passed on to its own plot call.
Step30: Note that now when passing additional arguments, those will apply as defaults to EACH of the dictionaries, but will not override any values explicitly provided in the dictionaries.
Step31: 3D Axes
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.0,<2.1"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b['q'] = 0.8
b['ecc'] = 0.1
b['irrad_method'] = 'none'
b.add_dataset('orb', times=np.linspace(0,4,1000), dataset='orb01', component=['primary', 'secondary'])
times, fluxes, sigmas = np.loadtxt('test.lc.in', unpack=True)
b.add_dataset('lc', times=times, fluxes=fluxes, sigmas=sigmas, dataset='lc01')
b.set_value('incl@orbit', 90)
b.run_compute(model='run_with_incl_90')
b.set_value('incl@orbit', 85)
b.run_compute(model='run_with_incl_85')
b.set_value('incl@orbit', 80)
b.run_compute(model='run_with_incl_80')
axs, artists = b.plot()
axs, artists = b['orb@run_with_incl_80'].plot()
axs, artists = b['orb@run_with_incl_80'].plot(time=1.0)
axs, artists = b['orb@run_with_incl_80'].plot(time=1.0, highlight_marker='s', highlight_color='g', highlight_ms=20)
axs, artists = b['orb@run_with_incl_80'].plot(time=1.0, highlight=False)
axs, artists = b['orb@run_with_incl_80'].plot(time=1.0, uncover=True)
axs, artists = b['primary@orb@run_with_incl_80'].plot()
axs, artists = b.plot(component='primary', kind='orb', model='run_with_incl_80')
axs, artists = b.plot('primary@orb@run_with_incl_80')
axs, artists = b.plot('primary@orb@run_with_incl_80', 'secondary@orb@run_with_incl_80')
axs, artists = b['run_with_incl_80'].plot('primary@orb', 'lc01')
axs, artists = b['orb01@primary@run_with_incl_80'].plot(x='times', y='vxs')
b['orb01@primary@run_with_incl_80'].qualifiers
axs, artists = b['lc01@dataset'].plot(x='phases', yerrors=None)
axs, artists = b['orb01@primary@run_with_incl_80'].plot(xunit='AU', yunit='AU')
axs, artists = b['orb01@primary@run_with_incl_80'].plot(xlabel='X POS', ylabel='Z POS')
axs, artists = b['orb01@primary@run_with_incl_80'].plot(xlim=(-2,2))
axs, artists = b['lc01@dataset'].plot(yerrors='sigmas')
axs, artists = b['lc01@dataset'].plot(yerrors=None)
axs, artists = b['orb01@primary@run_with_incl_80'].plot(color='r')
axs, artists = b['orb01@primary@run_with_incl_80'].plot(time=1.0, x='times', color='vzs')
axs, artists = b['orb01@primary@run_with_incl_80'].plot(time=1.0, x='times', color='vzs', cmap='spring')
axs, artists = b['orb@run_with_incl_80'].plot()
legend = plt.legend()
axs, artists = b['primary@orb@run_with_incl_80'].plot(label='primary')
axs, artists = b['secondary@orb@run_with_incl_80'].plot(label='secondary')
legend = axs[0].legend()
axs, artists = b['orb01@primary@run_with_incl_80'].plot(linestyle=':', linewidth=4)
fig = plt.figure(figsize=(14,10))
ax = [fig.add_subplot(2,2,i+1) for i in range(4)]
axs, artists = b.plot('orb01@primary', y='ys', ax=ax[0])
ax[0].legend()
axs, artists = b['orb01@run_with_incl_80'].plot(y='ys', linestyle='--', time=5, uncover=True, ax=ax[1])
ax[1].legend()
axs, artists = b.plot(dataset='orb01', y='ys', ax=ax[2])
ax[2].legend()
axs, artists = b.plot(dataset='orb01', model='run_with_incl_80', x='times', y='vys', time=5, uncover=True, ax=ax[3])
ax[3].legend()
fig = plt.figure(figsize=(14,10))
ax = [fig.add_subplot(2,2,i+1) for i in range(4)]
plot1 = {'twig': 'orb01@primary', 'y': 'ys', 'ax':ax[0]}
plot2 = {'twig': 'orb01@run_with_incl_80', 'y': 'ys', 'linestyle': '--', 'time': 5, 'uncover': True, 'ax':ax[1]}
plot3 = {'twig': 'orb01', 'y': 'ys', 'ax': ax[2]}
plot4 = {'dataset': 'orb01', 'model': 'run_with_incl_80', 'x': 'times', 'y': 'vys', 'time': 5, 'uncover': True, 'ax': ax[3]}
axs, artists = b.plot(plot1, plot2, plot3, plot4)
for axi in ax:
axi.legend()
fig = plt.figure(figsize=(14,10))
ax = [fig.add_subplot(2,2,i+1) for i in range(4)]
plot1 = {'twig': 'orb01@primary', 'y': 'ys', 'ax':ax[0]}
plot2 = {'twig': 'orb01@run_with_incl_80', 'y': 'ys', 'linestyle': '--', 'time': 5, 'uncover': True, 'ax':ax[1]}
plot3 = {'twig': 'orb01', 'y': 'ys', 'ax': ax[2]}
plot4 = {'dataset': 'orb01', 'model': 'run_with_incl_80', 'x': 'times', 'y': 'vys', 'time': 5, 'uncover': True, 'ax': ax[3]}
axs, artists = b.plot(plot1, plot2, plot3, plot4, x='xs', y='zs', color='r')
for axi in ax:
axi.legend()
figure = plt.figure()
ax = figure.add_subplot(111, projection='3d')
axes, artists = b['orb@run_with_incl_80'].plot(time=0, facecolor='teffs', edgecolor=None, ax=ax)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Descriptive statistics
Step6: Plot the distribution of money involved per transaction. There are ~900 cases where no money was exchanged, but an alert was still triggered. These are potential bad data points, and might need to be removed from the sample. I'd consult with other members of the team to determine whether that would be appropriate. The distribution of money spent is roughly log-normal.
Step8: No - every case in the cases table is associated with at least one rule triggering an alert.
Step9: Defining metrics
Step11: I have a label that predicts both a specific rule and its associated class for each transaction. So a reasonable ordered set of priorities might be
Step12: So the distribution of outcomes is very different depending on the overall rule type. Let's look at the actual numbers in each category.
Step14: This data splits the number of alerts by the category of the triggering rule and the ultimate outcome. In every category, the most common outcome is that funds were not withheld and there was no corresponding loss. However, the ratio of outcomes varies strongly by rule type. For rules on compliance, more than 80% of cases are benign and flagged as such. The benign fraction drops to 61% for financial risk and 56% for fraud. So the type of rule being broken is strongly correlated with the likelihood of a bad transaction.
Step15: This is one of the initial plots in the mock dashboard. It shows the overall performance of each rule sorted by outcome. Rule 17 stands out because it has only a single triggered alert in the dataset (agent placed funds on hold, but there was no fraud involved - false negative).
Step16: This is a good overall summary; we have three metrics for each rule, of which the combined F1 is considered to be the most important. For any rule, we can look at the corresponding plot in the dashboard and examine whether F1 is above a chosen threshold value (labeled here as 0.5). Reading from left to right in the top row, for example, Rule 1 is performing well, Rule 2 is acceptable, Rules 3-5 are performing below the desired accuracy, etc.
Step17: Financial risk rules are the largest category, and are mostly cases that were true negatives (money not held and it wasn't a bad transaction). The false negative rate is slightly larger than the true positive, though, indicating that financial risks are missing more than half of the genuinely bad transactions. Fraud rules also have true negatives as the most common category, but a significantly lower false negative rate compard to true positives. So these types are less likely to be missed by the agents. Compliance rules trigger the fewest total number of alerts; the rates of anything except a false negative are all low (81% of these alerts are benign).
Step20: Grouping by type; fraud rules have by a significant amount the best performance across all three metrics. Financial risk has comparable precision, but much worse recall. Compliance is poor across the board.
Step21: This will be the second set of plots in our dashboard. This shows the results over an expanding window covering the full length of time in the dataset, where the value of the three metrics (precision, recall, F1) track how the rules are performing with respect to the analysts and true outcomes over time.
Step22: Six out of the thirty rules have a variation $\Delta_\mathrm{max,abs} < 0.1$ in the second half of the current data. Of those, two (Rules 7 and 26) have only a handful of datapoints and estimates of the true accuracy are very uncertain. Two others (Rules 2 and 30) more data, although less than 100 points each. Rule 2 has very different behavior starting a few weeks toward the end, sharply increasing both its precision and recall. This could indicate either a difference in merchant tendencies or a re-definition of the existing rule. Rule 30 has shown a gradual improvement from an early nadir, which might be a sign of a set of bad/unlikely transactions earlier and now regressing to the mean. Rule 4 basically only has data in the second half of the set (not stabilized yet) and Rule 5 has a gradually decreasing recall, which may be a counterexample to the trend in Rule 30.
Step24: Cumulative performance of metrics split by rule type
Step25: Analysis
Step26: Rolling performance of metrics split by rule type
Step27: Co-occurence and effectiveness of rules
Step28: Rules 8, 14, 15, and 27 all have fairly strong co-occurrences with other rules in the set. These would be good candidates to check for the overall F1 scores and evaluate whether they're a necessary trigger for the system.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
import psycopg2
import pandas as pd # Requires v 0.18.0
import numpy as np
import seaborn as sns
sns.set_style("whitegrid")
dbname = 'risk'
username = 'willettk'
# Note: password must be entered to run, but don't put this anywhere public.
psswd = ''
engine = create_engine('postgresql://%s:%s@localhost/%s'%(username,psswd,dbname))
# Check if database exists
database_exists(engine.url)
# Load the risk databases from CSV files
cases = pd.read_csv('risk_data/cases.csv',
parse_dates=['alertDate'],
infer_datetime_format=True)
cases.rename(columns=lambda x: x.lower(), inplace=True)
print cases.dtypes
cases.head()
rules = pd.read_csv('risk_data/rules.csv')
rules.rename(columns=lambda x: x.lower(), inplace=True)
rules.head()
categories = pd.read_csv('risk_data/ruleCategories.csv')
categories.rename(columns=lambda x: x.lower(), inplace=True)
categories.head()
# Insert tables into PostgreSQL
cases.to_sql('cases', engine, if_exists='replace', index=False)
rules.to_sql('rules', engine, if_exists='replace', index=False)
categories.to_sql('categories', engine, if_exists='replace', index=False)
# As when setting up PSQL, the connection will need the password for the database entered here
con = psycopg2.connect(database = dbname, user = username, host='localhost', password=psswd)
# How many different rules are there, grouped by type?
sql_query =
SELECT ruletype,COUNT(ruletype)
FROM categories
GROUP BY ruleType;
pd.read_sql_query(sql_query,con).head()
# Are there cases triggered without any money involved in the transaction?
sql_query =
SELECT COUNT(caseid)
FROM cases
WHERE amount = 0;
pd.read_sql_query(sql_query,con).head()
pl = np.log10(cases.amount+1).hist(bins=50)
pl.set_xlabel("log(Transaction mount per triggered case [$])")
pl.set_ylabel("Count")
pl.axvline(np.log10(cases.amount.median()),color='r',lw=2,ls='--')
pl.set_title("Median transaction is ${:.2f}".format(cases.amount.median()));
cases.amount.max()
# What are the distributions of outcomes with regard to holds and bad merchants?
sql_query =
SELECT held, badmerch, COUNT(badmerch) as c
FROM cases
GROUP BY held,badmerch;
p = pd.read_sql_query(sql_query,con)
p.head()
# How many total cases are there?
print "Total number of cases in this data set: {}".format(len(cases))
# Does the number of rules violations equal the number of helds?
print len(rules)
print sum(cases.held)
# Are there rules violations that don't correspond to cases in the table?
sql_query =
SELECT COUNT(rules.caseid)
FROM rules
LEFT JOIN cases ON cases.caseid = rules.caseid
WHERE cases.caseid IS NULL;
pd.read_sql_query(sql_query,con).head()
# Look at the distribution of rule types for benign cases
sql_query =
SELECT ruletype,sum(count) FROM
(SELECT X.count, categories.ruletype FROM
(SELECT rules.ruleid, COUNT(rules.ruleid)
FROM rules
LEFT JOIN cases ON cases.caseid = rules.caseid
WHERE cases.held = 0
AND cases.badmerch = 0
GROUP BY rules.ruleid) X
JOIN categories ON categories.ruleid = X.ruleid
) Y
GROUP BY ruletype
;
ruletypes_clean = pd.read_sql_query(sql_query,con)
ax = sns.barplot(x="ruletype", y="sum", data=ruletypes_clean)
# Define helper functions for computing metrics of rule performance
def get_precision(TP,FP):
return TP* 1./ (TP + FP)
def get_recall(TP,FN):
return TP * 1./(TP + FN)
def get_accuracy(TP,FP,TN,FN):
return (TP + TN) * 1./ (TN+FN+FP+TP)
def get_f1(TP,FP,TN,FN):
precision = get_precision(TP,FP)
recall = get_recall(TP,FN)
return 2*precision*recall / (precision+recall)
# Print metrics for entire dataset
TN,FN,FP,TP = p.c / sum(p.c)
print "Precision: {:.3f}".format(get_precision(TP,FP))
print "Recall: {:.3f}".format(get_recall(TP,FN))
print "Accuracy: {:.3f}".format(get_accuracy(TP,FP,TN,FN))
print "F1: {:.3f}".format(get_f1(TP,FP,TN,FN))
sql_query =
SELECT X.ruleid, X.caseid, X.outcome, categories.ruletype FROM
(SELECT rules.ruleid, rules.caseid,
CASE
WHEN cases.held = 0 and cases.badMerch = 0 THEN 'not held, good'
WHEN cases.held = 0 and cases.badMerch = 1 THEN 'not held, bad'
WHEN cases.held = 1 and cases.badMerch = 0 THEN 'held, good'
WHEN cases.held = 1 and cases.badMerch = 1 THEN 'held, bad'
END outcome
FROM rules
LEFT JOIN cases ON cases.caseid = rules.caseid
) X
JOIN categories ON categories.ruleid = X.ruleid
;
allcases = pd.read_sql_query(sql_query,con)
fig,ax = plt.subplots(1,1,figsize=(10,6))
sns.countplot(x="ruletype", hue="outcome", data=allcases, ax=ax);
for g in allcases.groupby("ruletype"):
for gg in g[1].groupby("outcome"):
print "{:15}, {:15}, {:2.1f}%".format(g[0],gg[0],len(gg[1]) * 100./len(g[1]))
print ""
# Retrieve the outcomes of all triggered cases and encode those outcomes as numeric data
sql_query =
SELECT X.ruleid, X.caseid, X.outcome, categories.ruletype FROM
(SELECT rules.ruleid, rules.caseid,
CASE
WHEN cases.held = 0 and cases.badMerch = 0 THEN 0
WHEN cases.held = 0 and cases.badMerch = 1 THEN 1
WHEN cases.held = 1 and cases.badMerch = 0 THEN 2
WHEN cases.held = 1 and cases.badMerch = 1 THEN 3
END outcome
FROM rules
LEFT JOIN cases ON cases.caseid = rules.caseid
) X
JOIN categories ON categories.ruleid = X.ruleid
;
all_numeric = pd.read_sql_query(sql_query,con)
# Plot results as a grid of bar charts, separated by rule.
# Color indicates the overall rule type
ruleorder = list(categories[categories.ruletype=="Fraud"].ruleid.values) + \
list(categories[categories.ruletype=="Financial Risk"].ruleid.values) + \
list(categories[categories.ruletype=="Compliance"].ruleid.values)
grid = sns.FacetGrid(all_numeric,
col="ruleid",
hue="ruletype",
col_order = ruleorder,
col_wrap=8, size=2, aspect=1,
xlim=(0,3))
grid.map(plt.hist, "outcome", normed=True)
grid.set(xticks=[0,1,2,3])
grid.set_xticklabels(['TN','FN','FP','TP']);
metric,value,ruleid = [],[],[]
for g in all_numeric.groupby('ruleid'):
outcomes = {}
for gg in g[1].groupby('outcome'):
outcomes[gg[0]] = len(gg[1])
TN,FN,FP,TP = [outcomes.setdefault(i, 0) for i in range(4)]
p_ = get_precision(TP,FP) if (TP + FP) > 0 and TP > 0 else 0.
r_ = get_recall(TP,FN) if (TP + FN) > 0 and TP > 0 else 0.
if p_ > 0. and r_ > 0.:
f_ = get_f1(TP,FP,TN,FN)
else:
f_ = 0.
value.append(p_)
value.append(r_)
value.append(f_)
metric.append('precision')
metric.append('recall')
metric.append('f1')
ruleid.extend([g[0],]*3)
m = pd.DataFrame(index = range(len(metric)))
m['metric'] = pd.Series(metric)
m['value'] = pd.Series(value)
m['ruleid'] = pd.Series(ruleid)
# Plot the metrics for the overall data split by rule
grid = sns.FacetGrid(m,
col="ruleid",
col_wrap=8, size=2, aspect=1)
grid.map(sns.barplot, "metric","value","metric",palette=sns.color_palette("Set1"))
grid.map(plt.axhline, y=0.5, ls="--", c="0.5",lw=1);
# Plot the counts of each outcome split by rule type.
grid = sns.FacetGrid(all_numeric,
col="ruletype", hue="outcome",
col_wrap=3, size=5, aspect=1,
xlim=(0,3))
grid.map(plt.hist, "outcome")
grid.set(xticks=[0,1,2,3,4])
grid.set_xticklabels(['TN','FN','FP','TP']);
# Calculate precision, recall, F1 for data by rule type
rt_metric,rt_value,rt_ruletype = [],[],[]
for g in all_numeric.groupby('ruletype'):
outcomes = {}
for gg in g[1].groupby('outcome'):
outcomes[gg[0]] = len(gg[1])
TN,FN,FP,TP = [outcomes.setdefault(i, 0) for i in range(4)]
p_ = get_precision(TP,FP) if (TP + FP) > 0 and TP > 0 else 0.
r_ = get_recall(TP,FN) if (TP + FN) > 0 and TP > 0 else 0.
if p_ > 0. and r_ > 0.:
f_ = get_f1(TP,FP,TN,FN)
else:
f_ = 0.
rt_value.append(p_)
rt_value.append(r_)
rt_value.append(f_)
rt_metric.append('precision')
rt_metric.append('recall')
rt_metric.append('f1')
rt_ruletype.extend([g[0],]*3)
rtm = pd.DataFrame(index = range(len(rt_metric)))
rtm['metric'] = pd.Series(rt_metric)
rtm['value'] = pd.Series(rt_value)
rtm['ruletype'] = pd.Series(rt_ruletype)
# Plot the overall precision, recall, F1 for the dataset split by rule type
grid = sns.FacetGrid(rtm,
col="ruletype",
col_wrap=3, size=5, aspect=1)
grid.map(sns.barplot, "metric","value","metric",palette=sns.color_palette("Set1"))
grid.map(plt.axhline, y=0.5, ls="--", c="0.5",lw=1);
# Compute precision, recall, and F1 over an expanding time window
def ex_precision(ts):
TP = (ts.badmerch & ts.held).sum()
FP = (ts.held & np.logical_not(ts.badmerch)).sum()
if (TP + FP) > 0.:
return TP * 1./ (TP + FP)
else:
return 0.
def ex_recall(ts):
TP = (ts.badmerch & ts.held).sum()
FN = (ts.badmerch & np.logical_not(ts.held)).sum()
if (TP + FN) > 0.:
return TP * 1./(TP + FN)
else:
return 0.
def ex_f1(ts):
TP = (ts.badmerch & ts.held).sum()
FP = (ts.held & np.logical_not(ts.badmerch)).sum()
FN = (ts.badmerch & np.logical_not(ts.held)).sum()
num = 2*TP
den = 2*TP + FP + FN
if den > 0.:
return num * 1./den
else:
return 0.
# Make the expanded window with associated metrics by looping over every row in the dataframe
def make_expanded(ts,window=1):
expanding_precision = pd.concat([(pd.Series(ex_precision(ts.iloc[:i+window]),
index=[ts.index[i+window]])) for i in range(len(ts)-window) ])
expanding_recall = pd.concat([(pd.Series(ex_recall(ts.iloc[:i+window]),
index=[ts.index[i+window]])) for i in range(len(ts)-window) ])
expanding_f1 = pd.concat([(pd.Series(ex_f1(ts.iloc[:i+window]),
index=[ts.index[i+window]])) for i in range(len(ts)-window) ])
ex = pd.DataFrame(data={"precision":expanding_precision.values,
"recall":expanding_recall.values,
"f1":expanding_f1.values,
},
index=ts.index[1:])
return ex
# Run the expanded window for all cases, sorted by ruleid
sql_query =
SELECT cases.*,rules.ruleid
FROM cases
JOIN rules ON rules.caseid = cases.caseid
ORDER BY ruleid,alertdate
;
casejoined = pd.read_sql_query(sql_query,con)
exdict = {}
for g in casejoined.groupby("ruleid"):
ruleid = g[0]
df = g[1]
ts = pd.DataFrame(data={"amount":df.amount.values,
"held":df.held.values,
"badmerch":df.badmerch.values},
index=df.alertdate.values)
try:
exdict[ruleid] = make_expanded(ts)
except ValueError:
print "No true positives in Rule {} ({} trigger); cannot compute expanded window.".format(ruleid,len(df))
ruleid = 4
# Quick code to make single plots for presentation
pl = sns.barplot(x="metric",y="value",data=m[m.ruleid==ruleid])
pl.axhline(y=0.5, ls="--", c="0.5",lw=1)
pl.set_title("RuleID = {}".format(ruleid),fontsize=20);
pl = exdict[ruleid].plot(legend=True)
pl.set_title("RuleID = {}".format(ruleid),fontsize=20)
pl.set_ylim(0,1.05)
pl.set_ylabel("metrics",fontsize=12);
# Plot results in a grid
fig,axarr = plt.subplots(5,6,figsize=(15,15))
rules_sorted = sorted(exdict.keys())
for ruleid,ax in zip(rules_sorted,axarr.ravel()):
ex = exdict[ruleid]
pl = ex.plot(ax=ax,legend=(False | ruleid==6))
pl.set_title("ruleid = {}".format(ruleid))
pl.set_ylim(0,1.05)
pl.set_xticklabels([""])
# Rank rule performance by deltamax: the largest absolute deviation in the second half of the dataset.
l = []
for ruleid in exdict:
ex = exdict[ruleid]
ex_2ndhalf = ex.iloc[len(ex)//2:]
f1diff = (ex_2ndhalf.f1.max() - ex_2ndhalf.f1.min())
if np.isfinite(f1diff):
l.append((ruleid,f1diff,len(ex_2ndhalf)))
else:
print "No variation for Rule {:2} in the second half (median is zero).".format(ruleid)
lsorted = sorted(l, key=lambda x: x[1],reverse=True)
for ll in lsorted:
print "Rule {:2} varies by {:.2f} in the second half ({:4} data points)".format(*ll)
# Sort and print the rules matching the criteria for stability and high performance.
stable_good = []
stable_bad = []
unstable = []
for ruleid in exdict:
ex = exdict[ruleid]
ex_2ndhalf = ex.iloc[len(ex)//2:]
deltamax = (ex_2ndhalf.f1.max() - ex_2ndhalf.f1.min())
f1 = ex.iloc[len(ex)-1].f1
stable = True if deltamax < 0.1 and len(ex)//2 > 10 else False
good = True if f1 >= 0.5 else False
if stable and good:
stable_good.append(ruleid)
elif stable:
stable_bad.append(ruleid)
else:
unstable.append(ruleid)
print "{:2} rules {} are performing well.".format(len(stable_good),stable_good)
print "{:2} rules {} are not performing well.".format(len(stable_bad),stable_bad)
print "{:2} rules {} are unstable and cannot be evaluated yet.".format(len(unstable),unstable)
# Compute the change in performance by rule type over an expanding time window
sql_query =
SELECT cases.*,categories.ruletype
FROM cases
JOIN rules ON rules.caseid = cases.caseid
JOIN categories on categories.ruleid = rules.ruleid
ORDER BY categories.ruletype,alertdate
;
rtjoined = pd.read_sql_query(sql_query,con)
# Get the dataframes
rtd = {}
for g in rtjoined.groupby("ruletype"):
ruletype = g[0]
df = g[1]
ts = pd.DataFrame(data={"amount":df.amount.values,
"held":df.held.values,
"badmerch":df.badmerch.values},
index=df.alertdate.values)
try:
rtd[ruletype] = make_expanded(ts)
except ValueError:
print "Problems with {}".format(ruletype)
# Plot results in a grid
fig,axarr = plt.subplots(1,3,figsize=(15,6))
rules_sorted = sorted(rtd.keys())
for ruletype,ax in zip(rules_sorted,axarr.ravel()):
ex = rtd[ruletype]
pl = ex.plot(ax=ax)
pl.set_title("ruletype = {}".format(ruletype))
pl.set_ylim(0,1.05)
# Rank rules by the largest absolute deviation in the second half of the dataset.
l = []
for ruletype in rtd:
ex = rtd[ruletype]
ex_2ndhalf = ex.iloc[len(ex)//2:]
f1diff = (ex_2ndhalf.f1.max() - ex_2ndhalf.f1.min())
l.append((ruletype,f1diff,len(ex_2ndhalf)))
print ''
lsorted = sorted(l, key=lambda x: x[1],reverse=True)
for ll in lsorted:
print "{:15} rules vary by {:.2f} in the second half ({:4} data points)".format(*ll)
ts = pd.DataFrame(data={"amount":cases.amount.values,
"held":cases.held.values,
"badmerch":cases.badmerch.values},
index=cases.alertdate.values)
r = ts.rolling(window=7,min_periods=1)
# Make a rolling window with associated metrics by looping over every row in the dataframe
def r_precision(ts):
TP = (ts.badmerch & ts.held).sum()
FP = (ts.held & np.logical_not(ts.badmerch)).sum()
if (TP + FP) > 0.:
return TP * 1./ (TP + FP)
else:
return np.nan
def r_recall(ts):
TP = (ts.badmerch & ts.held).sum()
FN = (ts.badmerch & np.logical_not(ts.held)).sum()
if (TP + FN) > 0.:
return TP * 1./(TP + FN)
else:
return np.nan
def r_f1(ts):
TP = (ts.badmerch & ts.held).sum()
FP = (ts.held & np.logical_not(ts.badmerch)).sum()
FN = (ts.badmerch & np.logical_not(ts.held)).sum()
num = 2*TP
den = 2*TP + FP + FN
if den > 0.:
return num * 1./den
else:
return np.nan
def make_rolling(ts,window):
rolling_precision = pd.concat([(pd.Series(r_precision(ts.iloc[i:i+window]),
index=[ts.index[i+window]])) for i in range(len(ts)-window) ])
rolling_recall = pd.concat([(pd.Series(r_recall(ts_sorted.iloc[i:i+window]),
index=[ts.index[i+window]])) for i in range(len(ts)-window) ])
rolling_f1 = pd.concat([(pd.Series(r_f1(ts.iloc[i:i+window]),
index=[ts.index[i+window]])) for i in range(len(ts)-window) ])
r = pd.DataFrame(data={"precision":rolling_precision.values,
"recall":rolling_recall.values,
"f1":rolling_f1.values,
},
index=rolling_f1.index)
return r
# Run the rolling window for all cases, sorted by rule
rdict = {}
for g in casejoined.groupby("ruleid"):
ruleid = g[0]
df = g[1]
ts = pd.DataFrame(data={"amount":df.amount.values,
"held":df.held.values,
"badmerch":df.badmerch.values},
index=df.alertdate.values)
ts_sorted = ts.sort_index()
try:
rdict[ruleid] = make_rolling(ts_sorted,window=50)
except ValueError:
print "No true positives in Rule {} over interval ({} triggers); cannot compute rolling window.".format(ruleid,len(df))
# Empty dataframe
rdict[ruleid] = pd.DataFrame([0,]*len(df),index=[[casejoined.alertdate.min(),]*(len(df)-1) + [casejoined.alertdate.max()]])
# Plot the dashboard with rolling windows
fig,axarr = plt.subplots(5,6,figsize=(15,12))
for ax,r in zip(axarr.ravel(),rdict):
rp = rdict[r].plot(xlim=(casejoined.alertdate.min(),casejoined.alertdate.max()),
ylim=(0,1.05),
ax=ax,
legend=(False | r == 1))
if r < 25:
rp.set_xticklabels([""])
rp.set_title("ruleid = {}; N={}".format(r,len(rdict[r])));
# Same rolling analysis, but by rule type
rtrdict = {}
for g in rtjoined.groupby("ruletype"):
ruleid = g[0]
df = g[1]
ts = pd.DataFrame(data={"amount":df.amount.values,
"held":df.held.values,
"badmerch":df.badmerch.values},
index=df.alertdate.values)
ts_sorted = ts.sort_index()
try:
rtrdict[ruleid] = make_rolling(ts_sorted,window=200)
except ValueError:
print "No true positives in Rule {} over interval ({} triggers); cannot compute rolling window.".format(ruleid,len(df))
# Empty dataframe
rtrdict[ruleid] = pd.DataFrame([0,]*len(df),index=[[casejoined.alertdate.min(),]*(len(df)-1) + [casejoined.alertdate.max()]])
# Plot the dashboard with rolling windows by rule type
fig,axarr = plt.subplots(1,3,figsize=(15,6))
for ax,r in zip(axarr.ravel(),["Compliance","Financial Risk","Fraud"]):
rp = rtrdict[r].plot(xlim=(rtjoined.alertdate.min(),rtjoined.alertdate.max()),
ylim=(0,1.05),
ax=ax)
rp.set_title("Rule type = {}; N={}".format(r,len(rtrdict[r])));
# Compute the co-occurrence matrix for triggering rules
df = pd.DataFrame(index=rules.caseid.unique())
rule_count_arr = np.zeros((len(rules.caseid.unique()),30),dtype=int)
for idx,g in enumerate(rules.groupby('caseid')):
g1 = g[1]
for r in g1.ruleid.values:
# Numbering is a little off because there's no Rule 28 in the dataset.
if r < 28:
rule_count_arr[idx,r-1] = 1
else:
rule_count_arr[idx,r-2] = 1
# Create pandas DataFrame and rename the columns to the actual rule IDs
df = pd.DataFrame(data=rule_count_arr,
index=rules.caseid.unique(),
columns=[sorted(rules.ruleid.unique())])
# Co-occurrence matrix is the product of the matrix and its transpose
coocc = df.T.dot(df)
coocc.head()
# Plot the co-occurrence matrix and mask the diagonal and upper triangle values
# (mirrored on the bottom half of the matrix)
fig,ax = plt.subplots(1,1,figsize=(14,10))
mask = np.zeros_like(coocc)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
sns.heatmap(coocc,
mask = mask,
annot=True, fmt="d",
vmax = 100,
square=True,
ax=ax)
ax.set_xlabel('Rule',fontsize=16)
ax.set_ylabel('Rule',fontsize=16);
# How much money did bad transactions cost Insight in this dataset?
print "Bad money in transactions totals ${:.2f}.".format(cases[(cases.held == 0) & (cases.badmerch == 1)].amount.sum())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Simulate a 20Hz sawtooth wave
Step2: 2. Filter in 13-30Hz band
Step3: 3. Calculate instantaneous measures
Step4: 4. Visualize
Step5: 5. Example with real data
Step6: 6. Plot relationship between inst. amp. and inst. freq.
|
<ASSISTANT_TASK:>
Python Code:
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
# Define sawtooth shape in some number of samples
x1 = np.array([0,.05,.2,1,.9,.8,.7,.6,.5,.4,.3,.2,.1,.05,.01])
t1 = np.arange(len(x1))
# Interpolate sawtooth so it has 50 samples (50ms = 20Hz wave)
from scipy import interpolate
f = interpolate.interp1d(t1, x1)
t2 = np.linspace(0,len(t1)-1,50)
x2 = f(t2)
# Tile the new sawtooth to last 5 seconds
x = np.tile(x2,100)
x = x - np.mean(x)
Fs = 1000.
t = np.arange(0,5,.001)
# Plot sawtooth
plt.figure(figsize=(5,2))
plt.plot(t, x)
plt.ylim((-.7,.7))
plt.xlim((0,.5))
plt.xlabel('Time (s)')
plt.ylabel('Voltage (a.u.)')
from misshapen import nonshape
x_filt, _ = nonshape.bandpass_default(x, (13,30),Fs, w=1.5,rmv_edge=False)
x_amp = np.abs(sp.signal.hilbert(x_filt))
x_phase = np.angle(sp.signal.hilbert(x_filt))
# Instantaneous freq
x_freq = np.diff(x_phase)
x_freq[x_freq<0] = x_freq[x_freq<0]+2*np.pi
x_freq = x_freq*Fs/(2*np.pi)
samp_plt = range(1000,1200)
plt.figure(figsize=(5,5))
plt.subplot(3,1,1)
plt.plot(t[samp_plt],x[samp_plt],'k')
plt.plot(t[samp_plt],x_filt[samp_plt],'r')
plt.plot(t[samp_plt],x_amp[samp_plt],'b')
plt.xlim((t[samp_plt][0],t[samp_plt][-1]))
plt.xticks([])
plt.ylabel('raw (black)\nfiltered (red)\nInst. Amp. (blue)')
plt.subplot(3,1,2)
plt.plot(t[samp_plt],x_phase[samp_plt],'k')
plt.xlim((t[samp_plt][0],t[samp_plt][-1]))
plt.xticks([])
plt.ylabel('Inst. Phase (rad)')
plt.subplot(3,1,3)
plt.plot(t[samp_plt],x_freq[samp_plt],'k')
plt.xlim((t[samp_plt][0],t[samp_plt][-1]))
plt.xlabel('Time (s)')
plt.ylabel('Inst. Freq. (Hz)')
x2 = np.load('C:/gh/bv/misshapen/exampledata.npy')
from misshapen import nonshape
x2_filt, _ = nonshape.bandpass_default(x2, (13,30),Fs, w=3,rmv_edge=False)
x_amp = np.abs(sp.signal.hilbert(x2_filt))
x_phase = np.angle(sp.signal.hilbert(x2_filt))
# Instantaneous freq
x_freq = np.diff(x_phase)
x_freq[x_freq<0] = x_freq[x_freq<0]+2*np.pi
x_freq = x_freq*Fs/(2*np.pi)
samp_plt = range(3000,5000)
plt.figure(figsize=(10,6))
plt.subplot(4,1,1)
plt.plot(t[samp_plt],x2[samp_plt],'k')
plt.plot(t[samp_plt],x2_filt[samp_plt],'r')
plt.plot(t[samp_plt],x_amp[samp_plt],'b')
plt.xlim((t[samp_plt][0],t[samp_plt][-1]))
plt.xticks([])
plt.ylabel('raw (black)\nfiltered (red)\nInst. Amp. (blue)')
plt.subplot(4,1,2)
plt.plot(t[samp_plt],x_phase[samp_plt],'k')
plt.xlim((t[samp_plt][0],t[samp_plt][-1]))
plt.xticks([])
plt.ylabel('Inst. Phase (rad)')
plt.subplot(4,1,3)
plt.plot(t[samp_plt],x_freq[samp_plt],'k')
plt.xlim((t[samp_plt][0],t[samp_plt][-1]))
plt.xticks([])
plt.ylim((0,30))
plt.ylabel('Inst. Freq. (Hz)')
plt.subplot(4,1,4)
plt.plot(t[samp_plt],x_amp[samp_plt],'k')
plt.ylabel('Inst. Amp.')
plt.xlabel('Time (s)')
ax = plt.gca()
ax2 = ax.twinx()
ax2.plot(t[samp_plt],x_freq[samp_plt],'r')
plt.xlim((t[samp_plt][0],t[samp_plt][-1]))
plt.ylim((0,30))
plt.ylabel('Inst. Freq. (Hz)',color='r')
plt.figure(figsize=(6,6))
plt.plot(x_freq, x_amp[1:],'k.',alpha=.01)
plt.xlim((0,100))
plt.xlabel('Inst. Freq. (Hz)')
plt.ylabel('Inst. Amp. (uV)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
<ASSISTANT_TASK:>
Python Code:
def parentheses_util(no_left, no_right, pair_string, result):
# TODO: implement parentheses pairing here
pass
def pair_parentheses(n):
result_set = set()
if n == 0:
return result_set
parentheses_util(n, n, '', result_set)
return result_set
# %load test_n_pairs_parentheses.py
from nose.tools import assert_equal
class TestPairParentheses(object):
def test_pair_parentheses(self, solution):
assert_equal(solution(0), set([]))
assert_equal(solution(1), set(['()']))
assert_equal(solution(2), set(['(())',
'()()']))
assert_equal(solution(3), set(['((()))',
'(()())',
'(())()',
'()(())',
'()()()']))
print('Success: test_pair_parentheses')
def main():
test = TestPairParentheses()
test.test_pair_parentheses(pair_parentheses)
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
np_array = load_data()
scaler = MinMaxScaler()
X_one_column = np_array.reshape([-1, 1])
result_one_column = scaler.fit_transform(X_one_column)
transformed = result_one_column.reshape(np_array.shape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: $\alpha$-CsCl ($Pm\overline{3}m$)
Step2: Compare it with the experimental XRD pattern below.
Step3: $\beta$-CsCl ($Fm\overline{3}m$)
Step4: Compare it with the experimental XRD pattern below.
|
<ASSISTANT_TASK:>
Python Code:
# Set up some imports that we will need
from pymatgen import Lattice, Structure
from pymatgen.analysis.diffraction.xrd import XRDCalculator
from IPython.display import Image, display
%matplotlib inline
# Create CsCl structure
a = 4.209 #Angstrom
latt = Lattice.cubic(a)
structure = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
c = XRDCalculator()
c.show_xrd_plot(structure)
display(Image(filename=('./PDF - alpha CsCl.png')))
# Create CsCl structure
a = 6.923 #Angstrom
latt = Lattice.cubic(a)
structure = Structure(latt, ["Cs", "Cs", "Cs", "Cs", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5],
[0.5, 0.5, 0.5], [0, 0, 0.5], [0, 0.5, 0], [0.5, 0, 0]])
c.show_xrd_plot(structure)
display(Image(filename=('./PDF - beta CsCl.png')))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How to test it
Step2: 1.2 Equal variance (if two or more groups)
Step3: How to test it
Step4: 1.3 i.i.d. (independent and identically distributed)
Step5: 1.4 No or little multicolinearity (for linear regression)
Step6: 2.2 Two independent samples
Step7: 2.3 Two paired samples
Step8: 2.4 What happens if they don't have equal variance? (But normality)
Step9: 2.5 What happens if they are not normally distributed (but equal variance)
Step10: 2.6 What happens if they are not normally distributed (but equal variance) and paired samples?
Step11: 2.6 What happens if they are not normally distributed and not equal variance?
Step12: 3.2 Problem with many groups (look-elsewhere bias)
Step13: 3.3 Compare which groups are different
Step14: 3.3.2 Tukey's HSD (honest significant difference) test. Best
Step15: 4. Fitting distributions
Step16: 5. Calculate Confidence Intervals with bootstrap methods
Step17: In class exercise
Step18: PartA. Compare the number of employees of industrial vs financial companies
Step19: PartB. Compare the market capitalization for different types of companies
|
<ASSISTANT_TASK:>
Python Code:
Image(url="https://kanbanize.com/blog/wp-content/uploads/2014/07/Standard_deviation_diagram.png", width=500)
def qq_plot(x):
import scipy.stats
(osm, osr),(slope, intercept, r) = scipy.stats.probplot(x, dist='norm', plot=None)
plt.plot(osm, osr, '.', osm, slope*osm + intercept)
plt.xlabel('Quantiles',fontsize=14)
plt.ylabel('Quantiles Obs',fontsize=14)
x = np.random.randn(100)
plt.subplot(2,2,1)
qq_plot(x)
x = np.random.exponential(2,100)
plt.subplot(2,2,2)
qq_plot(x)
plt.tight_layout()
plt.show()
Image(url="http://goodsciencebadscience.nl/wp-content/uploads/2012/09/variances1.gif",width=500)
#random normal data (mean 0 std 1)
x = np.random.randn(100)
#random normal data (mean 0 std 2)
y = np.random.randn(100)*2
#Test
print(scipy.stats.levene(x,y))
print("These are the residuals")
Image(url="https://upload.wikimedia.org/wikipedia/commons/e/ed/Residuals_for_Linear_Regression_Fit.png",width=500)
Image(url="https://i.stack.imgur.com/RU17l.png")
from scipy.stats import norm
#Our sample, normally distributed with mean 0.1 and std 1
x = np.random.randn(100)+2
#histogram
plt.subplot(2,2,1)
sns.distplot(x,kde=False,fit=norm)
plt.subplot(2,2,2)
qq_plot(x)
plt.show()
print("-"*20)
print("Test for mean of group different than zero")
scipy.stats.ttest_1samp(x,popmean=0)
from scipy.stats import norm
#Our sample, normally distributed with mean 0.1 and std 1
x = np.random.randn(100)+0.1
#Our other sample, normally distributed with mean 0.5 and std 1
y = np.random.randn(100)+0.5
#Equal variance?
#We are testing if we can do the test
#If significant it means that the variances are different
print("Test for equal variance")
print(scipy.stats.levene(x,y))
#histogram
plt.subplot(2,2,1)
sns.distplot(x,kde=True)
sns.distplot(y,kde=True)
plt.subplot(2,2,2)
qq_plot(x)
qq_plot(y)
plt.tight_layout()
plt.show()
print("-"*20)
print("Test for different means")
scipy.stats.ttest_ind(x,y)
from scipy.stats import norm
#Our sample, normally distributed with mean 0.1 and std 1
x = np.random.randn(100)+0.1
#Our other sample, similar to x but adding 0.05 and some random noise
y = x+np.random.randn(100)/10 + 0.05
#Equal variance?
print("Test for equal variance")
print(scipy.stats.levene(x,y))
#histogram
plt.subplot(2,2,1)
sns.distplot(x,kde=True)
sns.distplot(y,kde=True)
plt.subplot(2,2,2)
qq_plot(x)
qq_plot(y)
plt.tight_layout()
plt.show()
print("WRONG: What would happen if we don't use paired t-test")
print(scipy.stats.ttest_ind(x,y))
print("-"*20)
print("Test for different means")
scipy.stats.ttest_rel(x,y)
scipy.stats.ttest_rel?
#Our sample, normally distributed with mean 0.1 and std 1
x = np.random.randn(100)+0.1
#Our other sample, normally distributed with mean 0.2 and std 2
y = np.random.randn(50)*2+0.2
#Equal variance?
print("Test for equal variance")
print(scipy.stats.levene(x,y))
#histogram
plt.subplot(2,2,1)
sns.distplot(x,kde=True)
sns.distplot(y,kde=True)
plt.subplot(2,2,2)
qq_plot(x)
qq_plot(y)
plt.tight_layout()
plt.show()
print("WRONG: What happens if we try the test assumming equal variance")
print(scipy.stats.ttest_ind(x,y))
print("-"*20)
print("Test for different means with different variance")
scipy.stats.ttest_ind(x,y,equal_var=False)
#Our sample, uniformly distributed
x = np.random.exponential(2,100)
#Our other sample, niformly distributed + 0.1
y = np.random.exponential(2,100)+0.1
#Equal variance?
print("Test for equal variance")
print(scipy.stats.levene(x,y))
#histogram
plt.subplot(2,2,1)
sns.distplot(x,kde=True)
sns.distplot(y,kde=True)
plt.subplot(2,2,2)
qq_plot(x)
qq_plot(y)
plt.show()
print("WRONG: What happens if we try t-test")
print(scipy.stats.ttest_ind(x,y))
print("-"*20)
print("Test for different means with no normal distributions, equal variance")
scipy.stats.mannwhitneyu(x,y)
#Our sample, normally distributed with mean 0.1 and std 1
x = np.random.random(100)
#Our other sample, normally distributed with mean 0.05 and std 1 and some random noise
y = x+np.random.randn(100)/10 + 0.05
#Equal variance?
print("Test for equal variance")
print(scipy.stats.levene(x,y))
#histogram
plt.subplot(2,2,1)
sns.distplot(x,kde=True)
sns.distplot(y,kde=True)
plt.subplot(2,2,2)
qq_plot(x)
qq_plot(y)
plt.show()
print("WRONG: What happens if we try t-test")
print(scipy.stats.ttest_rel(x,y))
print("-"*20)
print("Test for different means with no normal distributions and equal variance")
print(scipy.stats.wilcoxon(x,y))
#Our sample, normally distributed with mean 0.1 and std 1
x = np.random.randn(100)+0.1
#Our other sample, normally distributed with mean 0.2 and std 1
y = np.random.randn(100)+0.2
#Our other sample, normally distributed with mean 0.3 and std 1
z = np.random.randn(100)+0.3
#Equal variance?
print("Test for equal variance")
print(scipy.stats.levene(x,y,z))
#histogram
plt.subplot(2,2,1)
sns.distplot(x,kde=True)
sns.distplot(y,kde=True)
sns.distplot(z,kde=True)
plt.subplot(2,2,2)
qq_plot(x)
qq_plot(y)
qq_plot(z)
plt.show()
print("-"*20)
print("ONE-WAY ANOVA")
print(scipy.stats.f_oneway(x, y, z) )
print("-"*20)
print("ONE-WAY KRUSKAL WALLIS") #use if not-normal and equal variance
print(scipy.stats.mstats.kruskalwallis(x,y,z))
#Let's compare two identical samples, see how often we get false results
for i in range(100):
x = np.random.randn(100)
y = np.random.randn(100)
stat, pvalue = scipy.stats.ttest_ind(x,y)
if pvalue < 0.05:
print(i,pvalue)
#Our sample, normally distributed with mean 0.1 and std 1
x = np.random.randn(100)+0.1
#Our other sample, normally distributed with mean 0.2 and std 1
y = np.random.randn(100)+0.2
#Our other sample, normally distributed with mean 0.3 and std 1
z = np.random.randn(100)+0.3
#Our other sample, normally distributed with mean 0.3 and std 1
w = np.random.randn(100)+0.5
print("New threshold (instead of 0.05) = ",0.05/6)
#Compare x and y
print("x and y: ", scipy.stats.ttest_ind(x,y))
#Compare x and z
print("x and z: ", scipy.stats.ttest_ind(x,z))
#Compare x and w
print("x and w: ", scipy.stats.ttest_ind(x,w))
#Compare y and z
print("y and z: ", scipy.stats.ttest_ind(y,z))
#Compare y and w
print("y and w: ", scipy.stats.ttest_ind(y,w))
#Compare z and w
print("z and w: ", scipy.stats.ttest_ind(z,w))
from statsmodels.stats.multicomp import pairwise_tukeyhsd
df = pd.read_csv("data/tukey_example.csv")
df.sample(10)
res2 = pairwise_tukeyhsd(df["productivity"],df["group"])
print(res2)
res2.plot_simultaneous(comparison_name=None,xlabel='diffs',ylabel='Group')
plt.show()
from scipy.stats import lognorm,norm,expon,powerlaw
x = np.random.lognormal(1,0.8,3000)
plt.subplot(2,2,1)
sns.distplot(x,fit=norm,kde=False)
plt.title("norm")
plt.subplot(2,2,2)
sns.distplot(x,fit=lognorm,kde=False)
plt.title("lognorm")
plt.subplot(2,2,3)
sns.distplot(x,fit=expon,kde=False)
plt.title("expon")
plt.subplot(2,2,4)
sns.distplot(x,fit=powerlaw,kde=False)
plt.title("powerlaw")
plt.tight_layout()
plt.show()
import scikits.bootstrap as bootstrap
#Bootstrap of mean and std
x = np.random.randn(100)*3 #this could be one of our columns
CIs = bootstrap.ci(x, statfunction=np.mean,n_samples=100000)
print('CI for mean with bootstrapping = ', CIs)
CIs = bootstrap.ci(x, statfunction=np.std,n_samples=100000)
print('CI for std with bootstrapping = ', CIs)
x = np.random.randn(100)*3
np.mean(x),np.std(x)
CIs = bootstrap.ci(x, statfunction=np.mean,n_samples=100000)
print('CI for mean with bootstrapping = ', CIs)
scipy.stats.ttest_1samp(x,popmean=0)
#Bootsrap of p-value
def return_pvalue(x):
stat,pvalue = scipy.stats.ttest_1samp(x,0)
return pvalue
CIs = bootstrap.ci(x, statfunction=return_pvalue, n_samples=10000)
print('CI for p-value with bootstrapping = ', CIs)
df = pd.read_csv("data/big3_position.csv",sep="\t")
df.head()
industrial = df.loc[df["TypeEnt"]=="Industrial company"]
financial = df.loc[df["TypeEnt"]=="Financial company"]
#Employees (like most is lognormally distributed -> we can convert it to a normal distribution and run our tests happily)
i = np.log(industrial["Employees"].dropna())
f = np.log(financial["Employees"].dropna())
from scipy.stats import lognorm,norm
plt.figure(figsize=(10,3))
plt.subplot(1,2,1)
sns.distplot(df["Employees"].dropna(),fit=lognorm,kde=False)
plt.subplot(1,2,2)
sns.distplot(np.log(df["Employees"].dropna()),fit=norm,kde=False)
i.head()
f.head()
#Check if our distributions have the same variance
scipy.stats.levene(i,f)
sns.distplot(i)
sns.distplot(f)
#Plot histograms and qq_plots for i and f
qq_plot(i)
qq_plot(f)
#Run the correct statistical test to know if the
#number of employees of financial and industrial companies is different
scipy.stats.man(i,f)
df = pd.read_csv("data/big3_position.csv",sep="\t")
df.head()
#Create a new column that is the logarithm of "MarketCap", call it "logMarketCap"
df["logMarketCap"] = np.log(df["MarketCap"])
df.head()
df.describe()
#Replace infinite values (log(0) = infinite) with nan
df = df.replace([np.inf, -np.inf], np.nan)
#Drop na values if "logMarketCap" is na
df = df.dropna(subset=["logMarketCap"])
df.describe()
#Find the distribution of "MarketCap" and "logMarketCap", fitting the rigth distribution
from scipy.stats import norm,lognorm,expon
sns.distplot(df["MarketCap"],fit=lognorm,kde=False)
#Find the distribution of "MarketCap" and "logMarketCap", fitting the rigth distribution
from scipy.stats import norm,lognorm,expon
sns.distplot(df["logMarketCap"],fit=norm,kde=False)
#Kepe only three types of entities
df = df.loc[df["TypeEnt"].isin(["Financial company","Industrial company","Bank"])]
#Plot distributions for each type
plt.figure(figsize=(6,4))
for typeent,group in df.groupby("TypeEnt"):
sns.distplot(group["logMarketCap"],kde=False,norm_hist=True,label=typeent)
plt.legend()
#Run ANOVA
bank = df.loc[df["TypeEnt"] == "Bank"]
bank_values = bank["logMarketCap"]
ind = df.loc[df["TypeEnt"] == "Industrial company"]
ind_values = ind["logMarketCap"]
fin = df.loc[df["TypeEnt"] == "Financial company"]
fin_values = fin["logMarketCap"]
scipy.stats.f_oneway(bank_values,ind_values,fin_values)
#Run Tukey test
res2 = pairwise_tukeyhsd(df["logMarketCap"],df["TypeEnt"])
print(res2)
#Plot tukey test
res2.plot_simultaneous(comparison_name=None,xlabel='diffs',ylabel='Group')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: < optional web data acquisition >
Step2: Simple models
Step3: Note that the shift() method effectively slides the index up (relative to the data), keeping the same index range but clipping or extending the data column as needed.
Step4: Note that the data column stayed fixed, but we adjusted each value in the index according to the freq kwarg while maintaining the period e.g. one month.
Step5: Ok, great, now that we can simply shift the existing data column; let's attach that our original frame as our first "forecast" series.
Step6: We made a forecast!
Step7: Now how do we assess our model?
Step8: Not so bad! Certainly better than a random number generator.
Step10: Looks pretty good! We'll come back to a more quantitative assessment of this prediction in a bit.
Step11: We can use this function to create a similar DataFrame to the one we used earlier. We'll loop over the observed data, and use the past data as our input.
Step13: NOW FOR MOAR MODELS!
Step15: Note that we aren't paying any attention to the observed resolution of the data, only the relative position of any cyclic behavior.
Step17: Mean method
Step19: Drift method
Step20: Model accuracy metrics
Step21: Another view on these charts to quantify the quality of a model is to look at the distribution of residuals.
Step22: They all look pretty good, but we can be more specific.
Step23: Feel free to experiment (or BYO GridSearchCV), but I think you'll typically find these are all >0.95, and the naive model frequently has the highest value.
Step24: Since MAE isn't normalized, it's a little hard to eyeball (big numbers), and to compare different models. There are also mean absolute, and median absolute errors, if you feel like you want to "penalize" outliers in any particular fashion.
Step25: Window functions
Step26: Exponentially weighted windows
|
<ASSISTANT_TASK:>
Python Code:
import copy
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
from sklearn.metrics import r2_score, mean_squared_error
%matplotlib inline
plt.rcParams["figure.figsize"] = (8,6)
# `unemploy.csv` is included in the repo - it's a small csv
! head misc/unemploy.csv
# get data from local dir
data = pd.read_csv('misc/unemploy.csv', parse_dates=True, index_col=0)
data.head()
# this is for the data we have locally
data_name = 'UNEMPLOY'
# what does this data look like?
data.plot()
# check the top of the dataset
data.head()
# what does the shift() method do?
# (remember that pandas methods return a new df)
data.shift().head()
# what happens at the end of the series?
data.tail()
# 'periods=1' is the default first arg
data.shift(periods=1).tail()
data.head()
# a timely reminder of bit.ly/pd-offsets
data.shift(freq='2D').head()
data.tail()
data.shift(freq='1M').tail()
# use a copy for our forecast frame
d = copy.deepcopy(data)
# assign the "forecast" to be the 1-period shifted version of the "observed" column
d['forecast'] = data.shift(1)[data_name]
d.head()
Image(filename='misc/kermit.png')
# how does our forecast look by eye?
d.plot()
plt.title('naive')
plt.scatter(d[data_name], d[data_name])
plt.xlabel('truth')
plt.ylabel('also truth')
plt.title('this will never happen');
plt.scatter(d[data_name], d['forecast'])
plt.xlabel('truth')
plt.ylabel('forecast')
plt.title("variance is a sign that you're alive");
def fc_naive(data, **kwargs):
The 'naive' forecast of the next point in `data` (presumed to be
ordered in time) is equal to the last point observed in the series.
`data` should be a 1-D numpy array
Returns a single-valued forecast for the next value in the series.
forecast = data[-1]
return forecast
# container for the forecast
forecasts = []
# loop over positions in the array
for idx in range(len(data[data_name])):
# subset the series from beginning to position idx
array_slice = data[data_name].iloc[:idx].values
if idx < 10:
# a view behind the scenes...
print('iteration {}, array_slice: {}'.format(idx, array_slice))
# make a forecast using that series
try:
forecasts.append( fc_naive(array_slice) )
except IndexError:
# the first position won't have a forecast value
forecasts.append(np.nan)
d = copy.deepcopy(data)
d['forecast'] = forecasts
d.head()
d.plot()
plt.title('same naive graph');
def fc_snaive(data, n=7, **kwargs):
The 'seasonal naive' forecast of the next point in `data` (presumed to be
ordered in time) is equal to the point observed `n` points prior in the series.
`data` should be a 1-D numpy array
`n` should be an integer
Returns a single-valued forecast for the next value in the series.
forecast = data[-n]
return forecast
def forecast_series(observed, fc_func, **kwargs):
Returns an array of forecasted values (using `fc_func` and any `kwargs` like a window `n`)
for each value in the input np.array `observed`.
# container for the forecast
forecasts = []
for idx in range(len(observed)):
# subset the series from beginning to position idx
array_slice = observed[:idx]
# make a forecast using that series
try:
forecasts.append( fc_func(array_slice, **kwargs) )
except IndexError:
# the first position won't have a forecast value
forecasts.append(np.nan)
return forecasts
d = copy.deepcopy(data)
# our data is monthly, and i have a hunch about quarterly cycles, so let's use n=3 (3 months in a quarter)
forecasts = forecast_series(d[data_name].values, fc_snaive, n=3)
d['forecast'] = forecasts
d.head()
d.plot()
plt.title('seasonal naive (n=3)')
plt.scatter(d[data_name], d['forecast'])
plt.xlabel('truth')
plt.ylabel('forecast')
plt.title('seasonal naive method')
def fc_mean(data, n=3, **kwargs):
The 'mean' forecast of the next point in `data` (presumed to be
ordered in time) is equal to the mean value of the most recent `n` observed points.
`data` should be a 1-D numpy array
`n` should be an integer
Returns a single-valued forecast for the next value in the series.
# don't start averaging until we've seen n points
if len(data[-n:]) < n:
forecast = np.nan
else:
# nb: we'll keep the forecast as a float
forecast = np.mean(data[-n:])
return forecast
d = copy.deepcopy(data)
# let's try a 4-point rolling mean
forecasts = forecast_series(d[data_name].values, fc_mean, n=4)
d['forecast'] = forecasts
d.head()
d.plot()
plt.title('mean forecast (n=3)');
plt.scatter(d[data_name], d['forecast'])
plt.xlabel('truth')
plt.ylabel('forecast')
plt.title('mean method')
def fc_drift(data, n=3, **kwargs):
The 'drift' forecast of the next point in `data` (presumed to be
ordered in time) is a linear extrapoloation from `n` points ago, through the
most recent point.
`data` should be a 1-D numpy array
`n` should be an integer
Returns a single-valued forecast for the next value in the series.
yi = data[-n]
yf = data[-1]
slope = (yf - yi) / (n-1)
forecast = yf + slope
return forecast
d = copy.deepcopy(data)
# let's try a 5-point drift method
forecasts = forecast_series(d[data_name].values, fc_drift, n=5)
d['forecast'] = forecasts
d.head()
d.plot()
plt.title('drift method');
plt.scatter(d[data_name], d['forecast'])
plt.xlabel('truth')
plt.ylabel('forecast')
d = copy.deepcopy(data)
# feel free to tweak the 'n' args
model_list = [('naive', fc_naive, 0),
('seasonal_naive', fc_snaive, 3),
('mean', fc_mean, 3),
('drift', fc_drift, 5)]
# create new cols for each model
for name, model, nn in model_list:
d[name] = forecast_series(d[data_name].values, model, n=nn)
d.head()
d.plot()
plt.title('ALL THE FORECASTS!');
for name, series_data in d.items():
plt.plot(d[data_name], series_data, 'o', alpha=0.6, label=name)
plt.xlabel('truth')
plt.ylabel('pred')
plt.title('another view')
plt.legend()
comparison = 'naive'
(d[data_name] - d[comparison]).hist(bins=30)
plt.xlabel('residuals')
plt.title('residual distribution for method: {}'.format(comparison));
print('* R2 scores (bigger = better) *\n')
# calculate R2 for each model (against the observed data)
for name, series_data in d.items():
# strip rows with nans
subdf = d[[data_name, name]].dropna()
truth = subdf[data_name].values
pred = subdf[name].values
# calculate metric
r2 = r2_score(truth, pred)
print('{} - {:.4f}'.format(name, r2))
print('* MAE scores (smaller = better) *\n')
# calculate MAE for each model (against the observed data)
for name, series_data in d.items():
# strip rows with nans
subdf = d[[data_name, name]].dropna()
truth = subdf[data_name].values
pred = subdf[name].values
# calculate metric
mae = mean_squared_error(truth, pred)
print('{} - {:.4f}'.format(name, mae))
# recall our original 'data' dataframe
data.head()
# make a "rolling" object that we can use for calculations
r = data.rolling(window=5)
# this object can be treated much like a GroupBy object
r
# we can apply a number of methods to the Rolling object, like standard numerical calcs
r.mean().head(10)
plt.plot(data, 'o--', label=data_name)
plt.plot(r.mean(), '.-', label='rolling mean')
plt.legend()
plt.plot(data, 'o--', label=data_name)
plt.plot(r.max(), '.-', label='rolling max')
plt.legend()
# calculate stdev on the rolling object within window size
stds = r.std()
# add the stdevs as error bars on each point
data.plot(style='o', yerr=stds)
plt.title('observed data points + windowed stdev')
plt.legend();
plt.plot(data, 'o-', label=data_name)
plt.plot(data.ewm(span=5).mean(), '.--', label='EMW')
plt.legend();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Preprocessing
Step2: Regression over Time
|
<ASSISTANT_TASK:>
Python Code:
% matplotlib inline
import pandas as pd
from dateutil.relativedelta import relativedelta
import statsmodels.formula.api as sm
import requests
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
df_reg = pd.read_csv("../../data/retention/newcomer_sample_features.csv")
df_reg['timestamp'] = pd.to_datetime(df_reg['registration_day'], format='%Y-%m-%d')
df_reg['year'] = df_reg.timestamp.dt.year
YEARS = list(range(2006, 2016))
df_by_year = {}
for year in YEARS:
df_by_year[year] = df_reg.query('year == %s' % year)
def regress(df, formula, family = 'linear'):
if family == 'linear':
result = sm.ols(formula=f, data=df).fit()
elif family == 'logistic':
result = sm.logit(formula=f, data=df).fit(disp=0)
else:
print("Wrong Family")
return result
def get_coeffs_and_ci(df_dict, times, formula, family = 'linear', plot_coeff = 't1_harassment_received'):
reg_coeffs = []
lower_ci = []
upper_ci = []
for t in times:
result = regress(df_dict[t], formula, family)
reg_coeffs.append(result.params[plot_coeff])
lower_ci.append(result.conf_int().loc[plot_coeff, 0])
upper_ci.append(result.conf_int().loc[plot_coeff, 1])
return reg_coeffs, lower_ci, upper_ci
def plot_coeffs_over_time(times, reg_coeffs, lower_ci, upper_ci):
x = times
y = reg_coeffs
y_err = [np.subtract(y,lower_ci), np.subtract(upper_ci, y)]
plt.errorbar(x,y,y_err)
plt.xlim(times[0] - 1, times[-1] + 1)
plt.ticklabel_format(useOffset=False)
def plot_formula(df_dict, times, formula, family = 'linear', plot_coeff = 't1_harassment_received'):
reg_coeffs, lower_ci, upper_ci = get_coeffs_and_ci(df_dict, times, formula, family, plot_coeff)
plot_coeffs_over_time(times, reg_coeffs, lower_ci, upper_ci)
f = "t2_num_days_active ~ t1_num_days_active + t1_harassment_received"
plot_formula(df_by_year, YEARS, f)
df_gender = {}
for k in df_by_year:
df_gender[k] = df_by_year[k].query('has_gender == 1')
f="t2_num_days_active ~ t1_num_days_active + is_female"
plot_formula(df_gender, YEARS, f, plot_coeff='is_female')
f="t1_harassment_received ~ is_female"
plot_formula(df_gender, YEARS, f, plot_coeff='is_female')
f="t1_harassment_received ~ has_gender"
plot_formula(df_by_year, YEARS, f, family = 'logistic', plot_coeff='has_gender')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the Data
Step2: The data set is presented in a dictionary form
Step3: We can grab information and arrays out of this dictionary to set up our data frame and understanding of the features
Step4: Set up DataFrame
Step5: Now let's actually check out the dataframe!
Step6: Exploratory Data Analysis
Step7: Train the Support Vector Classifier
Step8: Predictions and Evaluations
Step9: Woah! Notice that we are classifying everything into a single class! This means our model needs to have it parameters adjusted (it may also help to normalize the data).
Step10: One of the great things about GridSearchCV is that it is a meta-estimator. It takes an estimator like SVC, and creates a new estimator, that behaves exactly the same - in this case, like a classifier. You should add refit=True and choose verbose to whatever number you want, higher the number, the more verbose (verbose just means the text output describing the process).
Step11: What fit does is a bit more involved then usual. First, it runs the same loop with cross-validation, to find the best parameter combination. Once it has the best combination, it runs fit again on all data passed to fit (without cross-validation), to built a single new model using the best parameter setting.
Step12: You can inspect the best parameters found by GridSearchCV in the best_params_ attribute, and the best estimator in the best_estimator_ attribute
Step13: Then you can re-run predictions on this grid object just like you would with a normal model.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
cancer.keys()
print(cancer['DESCR'])
cancer['feature_names']
df_feat = pd.DataFrame(cancer['data'],columns=cancer['feature_names'])
df_feat.info()
cancer['target']
df_target = pd.DataFrame(cancer['target'],columns=['Cancer'])
df.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_feat, np.ravel(df_target), test_size=0.30, random_state=101)
from sklearn.svm import SVC
model = SVC()
model.fit(X_train,y_train)
predictions = model.predict(X_test)
from sklearn.metrics import classification_report,confusion_matrix
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
param_grid = {'C': [0.1,1, 10, 100, 1000], 'gamma': [1,0.1,0.01,0.001,0.0001], 'kernel': ['rbf']}
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(SVC(),param_grid,refit=True,verbose=3)
# May take awhile!
grid.fit(X_train,y_train)
grid.best_params_
grid.best_estimator_
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test,grid_predictions))
print(classification_report(y_test,grid_predictions))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GPR Zero Offset App (Wave Regime)
Step2: Attenuation App
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from geoscilabs.gpr.GPR_zero_offset import WidgetWaveRegime
from geoscilabs.gpr.Attenuation import AttenuationWidgetTBL
fc = 250*1e6
d = 6
v = 3*1e8 / np.sqrt(4)
np.sqrt(v*d / (2*fc))
WidgetWaveRegime()
AttenuationWidgetTBL()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the Google cloud-storage library as well.
Step2: Restart the Kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your GCP account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex AI constants
Step12: Clients
Step13: Prepare a trainer script
Step14: Task.py contents
Step15: Store training script on your Cloud Storage bucket
Step16: Train a model
Step17: Example output
Step18: Response
Step19: Example output
Step20: projects.locations.customJobs.get
Step21: Response
Step22: Example output
Step23: Deploy the model
Step24: Example output
Step25: Response
Step26: Example output
Step27: Make batch predictions
Step28: Example output
Step29: Example output
Step30: Response
Step31: Example output
Step32: projects.locations.batchPredictionJobs.get
Step33: Response
Step35: Example output
Step36: Example output
Step37: Example output
Step38: Response
Step39: Example output
Step40: projects.locations.endpoints.deployModel
Step41: Example output
Step42: Response
Step43: Example output
Step44: projects.locations.endpoints.predict
Step45: Request
Step46: Example output
Step47: Response
Step48: Example output
Step49: Response
Step50: Example output
|
<ASSISTANT_TASK:>
Python Code:
! pip3 install -U google-cloud-aiplatform --user
! pip3 install google-cloud-storage
import os
if not os.getenv("AUTORUN"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access
# to your Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Vertex, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this tutorial in a notebook locally, replace the string
# below with the path to your service account key and run this cell to
# authenticate your Google Cloud account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json
# Log in to your account on Google Cloud
! gcloud auth login
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION gs://$BUCKET_NAME
! gsutil ls -al gs://$BUCKET_NAME
import os
import sys
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API Endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
# Make folder for python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\
tag_build =\n\
tag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\
setuptools.setup(\n\
install_requires=[\n\
],\n\
packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\
Name: Custom Census Income\n\
Version: 0.0.0\n\
Summary: Demonstration training script\n\
Home-page: www.google.com\n\
Author: Google\n\
Author-email: aferlitsch@google.com\n\
License: Public\n\
Description: Demo\n\
Platform: Vertex AI"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
%%writefile custom/trainer/task.py
# Single Instance Training for Census Income
from sklearn.ensemble import RandomForestClassifier
import joblib
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer
import datetime
import pandas as pd
from google.cloud import storage
import numpy as np
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
# Public bucket holding the census data
bucket = storage.Client().bucket('cloud-samples-data')
# Path to the data inside the public bucket
blob = bucket.blob('ai-platform/sklearn/census_data/adult.data')
# Download the data
blob.download_to_filename('adult.data')
# Define the format of your input data including unused columns (These are the columns from the census data files)
COLUMNS = (
'age',
'workclass',
'fnlwgt',
'education',
'education-num',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'capital-gain',
'capital-loss',
'hours-per-week',
'native-country',
'income-level'
)
# Categorical columns are columns that need to be turned into a numerical value to be used by scikit-learn
CATEGORICAL_COLUMNS = (
'workclass',
'education',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'native-country'
)
# Load the training census dataset
with open('./adult.data', 'r') as train_data:
raw_training_data = pd.read_csv(train_data, header=None, names=COLUMNS)
# Remove the column we are trying to predict ('income-level') from our features list
# Convert the Dataframe to a lists of lists
train_features = raw_training_data.drop('income-level', axis=1).values.tolist()
# Create our training labels list, convert the Dataframe to a lists of lists
train_labels = (raw_training_data['income-level'] == ' >50K').values.tolist()
# Since the census data set has categorical features, we need to convert
# them to numerical values. We'll use a list of pipelines to convert each
# categorical column and then use FeatureUnion to combine them before calling
# the RandomForestClassifier.
categorical_pipelines = []
# Each categorical column needs to be extracted individually and converted to a numerical value.
# To do this, each categorical column will use a pipeline that extracts one feature column via
# SelectKBest(k=1) and a LabelBinarizer() to convert the categorical value to a numerical one.
# A scores array (created below) will select and extract the feature column. The scores array is
# created by iterating over the COLUMNS and checking if it is a CATEGORICAL_COLUMN.
for i, col in enumerate(COLUMNS[:-1]):
if col in CATEGORICAL_COLUMNS:
# Create a scores array to get the individual categorical column.
# Example:
# data = [39, 'State-gov', 77516, 'Bachelors', 13, 'Never-married', 'Adm-clerical',
# 'Not-in-family', 'White', 'Male', 2174, 0, 40, 'United-States']
# scores = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
#
# Returns: [['State-gov']]
# Build the scores array.
scores = [0] * len(COLUMNS[:-1])
# This column is the categorical column we want to extract.
scores[i] = 1
skb = SelectKBest(k=1)
skb.scores_ = scores
# Convert the categorical column to a numerical value
lbn = LabelBinarizer()
r = skb.transform(train_features)
lbn.fit(r)
# Create the pipeline to extract the categorical feature
categorical_pipelines.append(
('categorical-{}'.format(i), Pipeline([
('SKB-{}'.format(i), skb),
('LBN-{}'.format(i), lbn)])))
# Create pipeline to extract the numerical features
skb = SelectKBest(k=6)
# From COLUMNS use the features that are numerical
skb.scores_ = [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0]
categorical_pipelines.append(('numerical', skb))
# Combine all the features using FeatureUnion
preprocess = FeatureUnion(categorical_pipelines)
# Create the classifier
classifier = RandomForestClassifier()
# Transform the features and fit them to the classifier
classifier.fit(preprocess.transform(train_features), train_labels)
# Create the overall model as a single pipeline
pipeline = Pipeline([
('union', preprocess),
('classifier', classifier)
])
# Split path into bucket and subdirectory
bucket = args.model_dir.split('/')[2]
subdir = args.model_dir.split('/')[-1]
# Write model to a local file
joblib.dump(pipeline, 'model.joblib')
# Upload the model to GCS
bucket = storage.Client().bucket(bucket)
blob = bucket.blob(subdir + '/model.joblib')
blob.upload_from_filename('model.joblib')
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz gs://$BUCKET_NAME/census.tar.gz
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest"
JOB_NAME = "custom_job_SKL" + TIMESTAMP
WORKER_POOL_SPEC = [
{
"replica_count": 1,
"machine_spec": {"machine_type": "n1-standard-4"},
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": ["gs://" + BUCKET_NAME + "/census.tar.gz"],
"python_module": "trainer.task",
"args": ["--model-dir=" + "gs://{}/{}".format(BUCKET_NAME, JOB_NAME)],
},
}
]
training_job = aip.CustomJob(
display_name=JOB_NAME, job_spec={"worker_pool_specs": WORKER_POOL_SPEC}
)
print(
MessageToJson(
aip.CreateCustomJobRequest(parent=PARENT, custom_job=training_job).__dict__[
"_pb"
]
)
)
request = clients["job"].create_custom_job(parent=PARENT, custom_job=training_job)
print(MessageToJson(request.__dict__["_pb"]))
# The full unique ID for the custom training job
custom_training_id = request.name
# The short numeric ID for the custom training job
custom_training_short_id = custom_training_id.split("/")[-1]
print(custom_training_id)
request = clients["job"].get_custom_job(name=custom_training_id)
print(MessageToJson(request.__dict__["_pb"]))
while True:
response = clients["job"].get_custom_job(name=custom_training_id)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
break
else:
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
# model artifact output directory on Google Cloud Storage
model_artifact_dir = (
response.job_spec.worker_pool_specs[0].python_package_spec.args[0].split("=")[-1]
)
print("artifact location " + model_artifact_dir)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest"
model = {
"display_name": "custom_job_SKL" + TIMESTAMP,
"artifact_uri": model_artifact_dir,
"container_spec": {"image_uri": DEPLOY_IMAGE, "ports": [{"container_port": 8080}]},
}
print(MessageToJson(aip.UploadModelRequest(parent=PARENT, model=model).__dict__["_pb"]))
request = clients["model"].upload_model(parent=PARENT, model=model)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
model_id = result.model
import json
import tensorflow as tf
INSTANCES = [
[
25,
"Private",
226802,
"11th",
7,
"Never-married",
"Machine-op-inspct",
"Own-child",
"Black",
"Male",
0,
0,
40,
"United-States",
],
[
38,
"Private",
89814,
"HS-grad",
9,
"Married-civ-spouse",
"Farming-fishing",
"Husband",
"White",
"Male",
0,
0,
50,
"United-States",
],
[
28,
"Local-gov",
336951,
"Assoc-acdm",
12,
"Married-civ-spouse",
"Protective-serv",
"Husband",
"White",
"Male",
0,
0,
40,
"United-States",
],
[
44,
"Private",
160323,
"Some-college",
10,
"Married-civ-spouse",
"Machine-op-inspct",
"Husband",
"Black",
"Male",
7688,
0,
40,
"United-States",
],
[
18,
"?",
103497,
"Some-college",
10,
"Never-married",
"?",
"Own-child",
"White",
"Female",
0,
0,
30,
"United-States",
],
[
34,
"Private",
198693,
"10th",
6,
"Never-married",
"Other-service",
"Not-in-family",
"White",
"Male",
0,
0,
30,
"United-States",
],
[
29,
"?",
227026,
"HS-grad",
9,
"Never-married",
"?",
"Unmarried",
"Black",
"Male",
0,
0,
40,
"United-States",
],
[
63,
"Self-emp-not-inc",
104626,
"Prof-school",
15,
"Married-civ-spouse",
"Prof-specialty",
"Husband",
"White",
"Male",
3103,
0,
32,
"United-States",
],
[
24,
"Private",
369667,
"Some-college",
10,
"Never-married",
"Other-service",
"Unmarried",
"White",
"Female",
0,
0,
40,
"United-States",
],
[
55,
"Private",
104996,
"7th-8th",
4,
"Married-civ-spouse",
"Craft-repair",
"Husband",
"White",
"Male",
0,
0,
10,
"United-States",
],
]
gcs_input_uri = "gs://" + BUCKET_NAME + "/" + "test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
for i in INSTANCES:
f.write(json.dumps(i) + "\n")
! gsutil cat $gcs_input_uri
model_parameters = Value(
struct_value=Struct(
fields={
"confidence_threshold": Value(number_value=0.5),
"max_predictions": Value(number_value=10000.0),
}
)
)
batch_prediction_job = {
"display_name": "custom_job_SKL" + TIMESTAMP,
"model": model_id,
"input_config": {
"instances_format": "jsonl",
"gcs_source": {"uris": [gcs_input_uri]},
},
"model_parameters": model_parameters,
"output_config": {
"predictions_format": "jsonl",
"gcs_destination": {
"output_uri_prefix": "gs://" + f"{BUCKET_NAME}/batch_output/"
},
},
"dedicated_resources": {
"machine_spec": {"machine_type": "n1-standard-2"},
"starting_replica_count": 1,
"max_replica_count": 1,
},
}
print(
MessageToJson(
aip.CreateBatchPredictionJobRequest(
parent=PARENT, batch_prediction_job=batch_prediction_job
).__dict__["_pb"]
)
)
request = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
print(MessageToJson(request.__dict__["_pb"]))
# The fully qualified ID for the batch job
batch_job_id = request.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
request = clients["job"].get_batch_prediction_job(name=batch_job_id)
print(MessageToJson(request.__dict__["_pb"]))
def get_latest_predictions(gcs_out_dir):
Get the latest prediction subfolder using the timestamp in the subfolder name
folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
response = clients["job"].get_batch_prediction_job(name=batch_job_id)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", response.state)
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
folder = get_latest_predictions(
response.output_config.gcs_destination.output_uri_prefix
)
! gsutil ls $folder/prediction*
! gsutil cat -h $folder/prediction*
break
time.sleep(60)
endpoint = {"display_name": "custom_job_SKL" + TIMESTAMP}
print(
MessageToJson(
aip.CreateEndpointRequest(parent=PARENT, endpoint=endpoint).__dict__["_pb"]
)
)
request = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
deployed_model = {
"model": model_id,
"display_name": "custom_job_SKL" + TIMESTAMP,
"dedicated_resources": {
"min_replica_count": 1,
"max_replica_count": 1,
"machine_spec": {"machine_type": "n1-standard-4", "accelerator_count": 0},
},
}
print(
MessageToJson(
aip.DeployModelRequest(
endpoint=endpoint_id,
deployed_model=deployed_model,
traffic_split={"0": 100},
).__dict__["_pb"]
)
)
request = clients["endpoint"].deploy_model(
endpoint=endpoint_id, deployed_model=deployed_model, traffic_split={"0": 100}
)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# The unique ID for the deployed model
deployed_model_id = result.deployed_model.id
print(deployed_model_id)
INSTANCES = [
[
25,
"Private",
226802,
"11th",
7,
"Never-married",
"Machine-op-inspct",
"Own-child",
"Black",
"Male",
0,
0,
40,
"United-States",
],
[
38,
"Private",
89814,
"HS-grad",
9,
"Married-civ-spouse",
"Farming-fishing",
"Husband",
"White",
"Male",
0,
0,
50,
"United-States",
],
[
28,
"Local-gov",
336951,
"Assoc-acdm",
12,
"Married-civ-spouse",
"Protective-serv",
"Husband",
"White",
"Male",
0,
0,
40,
"United-States",
],
[
44,
"Private",
160323,
"Some-college",
10,
"Married-civ-spouse",
"Machine-op-inspct",
"Husband",
"Black",
"Male",
7688,
0,
40,
"United-States",
],
[
18,
"?",
103497,
"Some-college",
10,
"Never-married",
"?",
"Own-child",
"White",
"Female",
0,
0,
30,
"United-States",
],
[
34,
"Private",
198693,
"10th",
6,
"Never-married",
"Other-service",
"Not-in-family",
"White",
"Male",
0,
0,
30,
"United-States",
],
[
29,
"?",
227026,
"HS-grad",
9,
"Never-married",
"?",
"Unmarried",
"Black",
"Male",
0,
0,
40,
"United-States",
],
[
63,
"Self-emp-not-inc",
104626,
"Prof-school",
15,
"Married-civ-spouse",
"Prof-specialty",
"Husband",
"White",
"Male",
3103,
0,
32,
"United-States",
],
[
24,
"Private",
369667,
"Some-college",
10,
"Never-married",
"Other-service",
"Unmarried",
"White",
"Female",
0,
0,
40,
"United-States",
],
[
55,
"Private",
104996,
"7th-8th",
4,
"Married-civ-spouse",
"Craft-repair",
"Husband",
"White",
"Male",
0,
0,
10,
"United-States",
],
]
prediction_request = aip.PredictRequest(endpoint=endpoint_id)
prediction_request.instances.append(INSTANCES)
print(MessageToJson(prediction_request.__dict__["_pb"]))
request = clients["prediction"].predict(endpoint=endpoint_id, instances=INSTANCES)
print(MessageToJson(request.__dict__["_pb"]))
request = clients["endpoint"].undeploy_model(
endpoint=endpoint_id, deployed_model_id=deployed_model_id, traffic_split={}
)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
delete_model = True
delete_endpoint = True
delete_pipeline = True
delete_batchjob = True
delete_bucket = True
# Delete the model using the Vertex AI fully qualified identifier for the model
try:
if delete_model:
clients["model"].delete_model(name=model_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint
try:
if delete_endpoint:
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the custom training using the Vertex AI fully qualified identifier for the custome training
try:
if custom_training_id:
clients["job"].delete_custom_job(name=custom_training_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex AI fully qualified identifier for the batch job
try:
if delete_batchjob:
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r gs://$BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Sources
Step2: Note that the four major sub-components of spending (Defense, Medicare, Nondefense Discretionary, and Social Security) are 'stacked' on this plot, showing their total relative to the total government spending.
Step3: Data 2 is GDP and GDP per capita data from the World Bank. This will be added to df1 to show which countries spend the most on defense as a fraction of their GDP, but first the country names have to be converted to 3-character ISO codes to match the WB dataset. The pycountry module is used to automate the conversion between country names and ISO codes.
Step4: Data 2 is GDP and GDP per capita data from the World Bank. Now that the aforementioned military spending data is organized by ISO country codes, the wbdata python module can easily retrieve the GDP data for the countries we have defense data for. This is used to compare U.S. defense spending to other countries.
Step5: Not surprisingly, US is dominant in 2014 GDP spending (constant 2005 USD).
Step6: Also not surprisingly, when using GDP per capita the difference isn't so clear. Scaling defense spending by GDP per capita should be an effective screen to see how much each US citizen is effectively spending on defense compared to citizens of other countries.
|
<ASSISTANT_TASK:>
Python Code:
## This cell imports various packages which are needed for the code to run
import pandas as pd # pandas is used to organize the data
import matplotlib.pyplot as plt # Intro/basic graphics
import seaborn as seab # Better graphics
import pandas.io.data as dr # for data 0 from FRED
import datetime # for data 0 from FRED
import wbdata as wb # access World Bank GDP data. To install, type 'pip install wbdata' in command line
# import pycountry # reference only - module is imported in the same cell it is used to reduce confusion
# keeps plots in-line
%matplotlib inline
import sys
## Data 0 - USG Spending Breakdown
## Data retrieved using pandas tool from FRED (Federal Reserve Bank of St. Louis)
## http://pandas.pydata.org/pandas-docs/stable/remote_data.html
start=datetime.datetime(1995,1,1)
end=datetime.datetime(2015,12,31)
#defSpend=dr.DataReader('FDEFX','fred',start,end)
df0=dr.DataReader('FDEFX','fred',start,end)
df0a=dr.DataReader('FGEXPND','fred',start,end) #FGEXPND
df0a.columns=['Total Expenses']
df0['Medicare']=dr.DataReader('W824RC1','fred',start,end)
df0['Nondefense Disc.']=dr.DataReader('FNDEFX','fred',start,end)
df0['Social Security']=dr.DataReader('W823RC1','fred',start,end)
df0.columns=['Defense','Medicare','Nondefense Disc.','Social Security']
# Plot Data 0 with the four sub-components stacked to show how they compare to total spending.
seab.set_style('dark')
fig, ax = plt.subplots(figsize=(10,7))
df0.plot(ax=ax,stacked=True)
df0a.plot(ax=ax,stacked=False)
# ax.fill_between(df0['Defense'],df0['Medicare'],color='g',interpolate=True)
# Note: I had hoped to use fill_between for the df0 data to provide a better visual 'feel' for what fraction of the total
# each expenses is. In the end I was not able to figure it out.
ax.set_xlabel('Year')
ax.set_ylabel('Spending, Billions of USD')
ax.set_title('Figure 0, Breakdown of Major Government Expenses, 1995-2015')
## Data 1 - SIPRI Military Spending data
# File location:='C:\Users\John\Desktop\Data Bootcamp\Project\SIPRI2014.xlsx'
# Excel file to be included with submission. The data had to be modified to make country names exactly match ISO definitions
# in pycountry module, see next cell.
df1=pd.read_excel('SIPRI2014.xlsx')
df1=df1.set_index(['MILEX2014'])
df1=df1.transpose()
# This cell changes country names to 3-letter ISO codes.
# note: if pycountry is not already installed on the machine, open command prompt and type 'pip install pycountry'
import pycountry
# get list of countries from SIPRI data in the same order
countryList=list(df1.columns)
# this FOR loop is adapted from the pycountry documentation
input_countries = countryList
countriesISO = {}
for country in pycountry.countries:
countriesISO[country.name] = country.alpha3 # alpha3 refers to standard 3-letter abbreviation
# this line sets 'Unknown Code' to the countries where the SIPRI name doesn't match pycountry. Had to fix this manually.
isocodes = [countriesISO.get(country, 'Unknown code') for country in input_countries]
# Print the list to see which names do not match up. Various reasons why this could happen - e.g. 'Dominican Rep.' instead of
# 'Dominican Republic'. Some countries in the SIPRI set aren't recognized in the pycountry module (e.g. North/South Sudan), these
# were generally fixed manually to match or removed. South Sudan doesn't have great data for defense spending.
# add ISO codes to SIPRI data df1 so that GDP data (Data 2, see below) can be easily pulled
df1iso=df1
df1iso.columns=isocodes # Replace full country names (series labels) with 3-character ISO codes
df1iso.head()
## Data 2 - GDP Data ref http://blogs.worldbank.org/opendata/accessing-world-bank-data-apis-python-r-ruby-stata
## Accessed using wbdata python module and added to SIPRI df1. This is illustrative, it could also be done using DataReader.
## The updated dataframe is re-identified as df2 for clarity.
import datetime
df2=df1iso #retrieve the dataframe from previous cell with the ISO codes as variables and defense spending 1988-2015
# A list to tell wb which countries to look for. this creates a list of each country's ISO, still in the proper order
wbcountries=list(df2.columns)
# A dictionary to tell wbdata which data I want (GDP and GDP per capita)
wbindicator1 = {'NY.GDP.MKTP.KD':'GDP 2005 USD'}
wbindicator2 = {'NY.GDP.PCAP.KD':'GDP per capita 2005 USD'}
wbindicators = {'NY.GDP.MKTP.KD':'GDP 2005 USD','NY.GDP.PCAP.KD':'GDP per capita 2005 USD'}
df2_gdp=wb.get_dataframe(wbindicator1,country=wbcountries,convert_date=False)
df2_gdppc=wb.get_dataframe(wbindicator2,country=wbcountries,convert_date=False)
df2_gdp = df2_gdp.unstack().transpose() # fixes formatting
df2_gdppc = df2_gdppc.unstack().transpose()
df2_gdp=df2_gdp.drop(df2_gdp.index[0:28]) # quick and dirty way to get the GDP dates to match the SIPRI data
df2_gdppc=df2_gdppc.drop(df2_gdppc.index[0:28])
df2_gdp.head()
df2_gdppc.head()
fig, ax = plt.subplots(figsize=(10,7))
df2_gdp.plot(ax=ax,legend=False)
#df2_gdppc.plot(ax=ax,legend=False)
ax.set_xlabel('Year')
ax.set_ylabel('GDP, 2005 USD')
ax.set_title('GDP of Countries in SIPRI Study')
fig, ax = plt.subplots(figsize=(10,7))
df2_gdppc.plot(ax=ax,legend=False)
ax.set_xlabel('Year')
ax.set_ylabel('GDP per capita, 2005 USD')
ax.set_title('GDP per capita of Countries in SIPRI Study')
# 'Air gap' in code to avoid breaking Data 2 cells
df3a=df1iso #defense spending per year, ISO names
df3b=df2_gdp #gdp, 2005 USD, full country names
df3c=df2_gdppc #gdppc, 2005 USD, full country names
# Want function:
# Eliminate all years except 2015 from GDP, GDPPC, and DEF data
# df2_gdp=df2_gdp.drop(df2_gdp.index[0:28]) # quick and dirty way to get the GDP dates to match the SIPRI data
# set up the following as df4
# DEF DEF/GDP LAT LON ISO
# Country X X/Y 1 1 ALB
# Albania
# Japan
# Separates 2015 Defense spending from the rest of the data (other years), retains ISO codes & ordering
df3aa=df3a.drop(df3a.index[0:27])
df3aa=df3aa.drop(df3aa.index[1])
df3aa=df3aa.T
df3aa.columns=['2015DEF']
#Separates GDP for 2014, in 2005 USD
wbcountries=list(df2.columns)
del wbcountries[-1]
df3b.columns=wbcountries
df3bbb=df3b
df3bbb=df3bbb.drop(df3bbb.index[0:26])
df3bbb=df3bbb.drop(df3bbb.index[1])
df3bbb=df3bbb.T
# Data for GDP per capita, 2014, in 2005 USD
wbcountries=list(df2.columns) # get ISO codes back
del wbcountries[-1]
df3c.columns=wbcountries # convert column names from full names to ISO
df3ccc=df3c
df3ccc=df3ccc.drop(df3ccc.index[0:26]) # drop all but 2014
df3ccc=df3ccc.drop(df3ccc.index[1]) # drop all but 2014
df3ccc=df3ccc.T # fulfill want function, 2014 GDPPC as column
# Df4 builds the dataframe described in the 'want function'. This can easily be plotted as a bar graph, but
# with 160+ countries it's very difficult to get anything from it at a glance.
df4=df3aa
df4['GDP2005']=df3bbb
df4['GDPPC 2005']=df3ccc
df4.head(25)
# This cell shows the horizontal bar graph mentioned above. Warning, they're huge.
# fig, ax = plt.subplots(figsize=(10,250))
# df4['GDP2005'].plot(ax=ax,kind='barh')
# df4['GDPPC 2015'].plot(ax=ax,kind='barh')
# This cell is intended to perform operations on the columns, which would be plotted.
# Defense spending / GDP
# Defense spending / GDP per capita
# Then plot and look for outliers. USA spending will be highest by a mile based only on GDP, but will come back
# to Earth when it's compared on a GDP per capita basis.
#df4.head(25)
#df4['DEF div GDP']=df4['2015DEF']/df4['GDP2005']
#df4['DEF div GDPPC']=df4['2015DEF']/df4['GDPPC 2005']
#df4['DEF div by GDP']=df4['2015DEF'].div(df4['GDP2005'],axis='index')
# Scatter plot, Defense spending vs. GDP
# I have no idea why this doesn't work. It says that the series names
# are not in the index, but they're not supposed to be in the index.
#fig, ax = plt.subplots(figsize=(10,7))
#x=df4['2015DEF'].dropna()
#y=df4['GDP2005'].dropna()
#seab.lmplot(df4['2015DEF'],df4['GDP2005'],data=df4,fit_reg=True)
#seab.lmplot('x','y',data=df4,fit_reg=True)
#ax.set_xlabel('2015 Defense Spending')
#ax.set_ylabel('GDP Year 2014, constant 2005 USD')
#ax.set_title('Defense spending vs. GDP for Countries in SIPRI Study')
# Scatter plot, Defense spending vs. GDP per capita
# I have no idea why this doesn't work. It says that the series names
# are not in the index, but they're not supposed to be in the index.
#fig, ax = plt.subplots(figsize=(10,7))
#df4.plot(x='2015DEF', y='GDPPC 2005', ax=ax[0], kind="scatter",color="blue")
#ax.set_xlabel('2015 Defense Spending')
#ax.set_ylabel('GDP per capita 2014, constant 2005 USD')
#ax.set_title('Defense spending vs. GDP for Countries in SIPRI Study')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import libraries
Step2: Configure GCP environment settings
Step3: Authenticate your GCP account
Step4: Create the embedding lookup model
Step5: Create the model and export the SavedModel file
Step6: Inspect the exported SavedModel using the saved_model_cli command line tool
Step7: Test the SavedModel file
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q -U pip
!pip install -q tensorflow==2.2.0
!pip install -q -U google-auth google-api-python-client google-api-core
import os
import numpy as np
import tensorflow as tf
print(f"Tensorflow version: {tf.__version__}")
PROJECT_ID = "yourProject" # Change to your project.
BUCKET = "yourBucketName" # Change to the bucket you created.
EMBEDDING_FILES_PATH = f"gs://{BUCKET}/bqml/item_embeddings/embeddings-*"
MODEL_OUTPUT_DIR = f"gs://{BUCKET}/bqml/embedding_lookup_model"
!gcloud config set project $PROJECT_ID
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except:
pass
if tf.io.gfile.exists(MODEL_OUTPUT_DIR):
print("Removing {} contents...".format(MODEL_OUTPUT_DIR))
tf.io.gfile.rmtree(MODEL_OUTPUT_DIR)
from embeddings_lookup import lookup_creator
lookup_creator.export_saved_model(EMBEDDING_FILES_PATH, MODEL_OUTPUT_DIR)
!saved_model_cli show --dir {MODEL_OUTPUT_DIR} --tag_set serve --signature_def serving_default
loaded_model = tf.saved_model.load(MODEL_OUTPUT_DIR)
input_items = ["2114406", "2114402 2120788", "abc123"]
output = loaded_model(input_items)
print(f"Embeddings retrieved: {output.shape}")
for idx, embedding in enumerate(output):
print(f"{input_items[idx]}: {embedding[:5]}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we can grab a list of the GTEx tissue names
Step2: Take a peek at the tissue names we get to make sure they're what we expect
Step3: We can start with the process for the eQTL tables since they are smaller and a bit easier to work with. There are pretty much three steps here
Step6: To ensure that everything is joined correctly later on, we add both the _gene_id and tss_distance fields to the table keys here.
Step7: Now we can read in each individual MatrixTable and add it to the list we will pass to multi_way_union_cols.
Step8: Repartition unioned MatrixTable
Step9: I tried reading in the MatrixTable with _n_partitions=1000 to see how our partitions would look, but we still had a few that were much larger than the rest. So after this I ended up doing using repartition with a full shuffle, and it balanced things out.
Step10: And now we have a single MatrixTable for the GTEx eQTL data.
|
<ASSISTANT_TASK:>
Python Code:
import subprocess
import hail as hl
hl.init()
list_tissues = subprocess.run(["gsutil", "-u", "broad-ctsa", "ls",
"gs://hail-datasets-tmp/GTEx/GTEx_Analysis_v8_QTLs/GTEx_Analysis_v8_eQTL_all_associations"],
stdout=subprocess.PIPE)
tissue_files = list_tissues.stdout.decode("utf-8").split()
tissue_names = [x.split("/")[-1].split(".")[0] for x in tissue_files]
tissue_names[0:5]
for tissue_name in tissue_names:
print(f"eQTL: {tissue_name}")
ht = hl.read_table(f"gs://hail-datasets-us/GTEx_eQTL_allpairs_{tissue_name}_v8_GRCh38.ht", _n_partitions=64)
ht = ht.annotate(_gene_id = ht.gene_id, _tss_distance = ht.tss_distance)
ht = ht.drop("variant_id", "metadata")
ht = ht.key_by("locus", "alleles", "_gene_id", "_tss_distance")
ht = ht.annotate(**{tissue_name: ht.row_value.drop("gene_id", "tss_distance")})
ht = ht.select(tissue_name)
mt = ht.to_matrix_table_row_major(columns=[tissue_name], col_field_name="tissue")
mt = mt.checkpoint(
f"gs://hail-datasets-tmp/GTEx/eQTL_MatrixTables/GTEx_eQTL_all_snp_gene_associations_{tissue_name}_v8_GRCh38.mt",
overwrite=False,
_read_if_exists=True
)
from typing import List
def multi_way_union_cols(mts: List[hl.MatrixTable], column_keys: List[str]) -> hl.MatrixTable:
missing_struct = "struct{ma_samples: int32, ma_count: int32, maf: float64, pval_nominal: float64, slope: float64, slope_se: float64}"
mts = [mt._localize_entries("_mt_entries", "_mt_cols") for mt in mts]
joined = hl.Table.multi_way_zip_join(mts, "_t_entries", "_t_cols")
joined = joined.annotate(_t_entries_missing = joined._t_entries.map(lambda x: hl.is_missing(x)))
rows = [(r, joined._t_entries.map(lambda x: x[r])[0])
for r in joined._t_entries.dtype.element_type.fields
if r != "_mt_entries"]
Need to provide a dummy array<struct> for if tissues are not present to make sure missing elements not
dropped from flattened array.
Otherwise we will get a HailException: length mismatch between entry array and column array in
'to_matrix_table_row_major'.
entries = [("_t_entries_flatten",
hl.flatten(
joined._t_entries.map(
lambda x: hl.if_else(
hl.is_defined(x),
x._mt_entries,
hl.array([
hl.struct(
ma_samples = hl.missing(hl.tint32),
ma_count = hl.missing(hl.tint32),
maf = hl.missing(hl.tfloat64),
pval_nominal = hl.missing(hl.tfloat64),
slope = hl.missing(hl.tfloat64),
slope_se = hl.missing(hl.tfloat64)
)
])
)
)
)
)]
joined = joined.annotate(**dict(rows + entries))
Also want to make sure that if entry is missing, it is replaced with a missing struct of the same form
at the same index in the array.
joined = joined.annotate(_t_entries_new = hl.zip(joined._t_entries_missing,
joined._t_entries_flatten,
fill_missing=False))
joined = joined.annotate(
_t_entries_new = joined._t_entries_new.map(
lambda x: hl.if_else(x[0] == True, hl.missing(missing_struct), x[1])
)
)
joined = joined.annotate_globals(_t_cols = hl.flatten(joined._t_cols.map(lambda x: x._mt_cols)))
joined = joined.drop("_t_entries", "_t_entries_missing", "_t_entries_flatten")
mt = joined._unlocalize_entries("_t_entries_new", "_t_cols", ["tissue"])
return mt
# Get list of file paths for individual eQTL MatrixTables
list_eqtl_mts = subprocess.run(["gsutil", "-u", "broad-ctsa", "ls", "gs://hail-datasets-tmp/GTEx/eQTL_MatrixTables"],
stdout=subprocess.PIPE)
eqtl_mts = list_eqtl_mts.stdout.decode("utf-8").split()
# Load MatrixTables for each tissue type to store in list for MWUC
mts_list = []
for eqtl_mt in eqtl_mts:
tissue_name = eqtl_mt.replace("gs://hail-datasets-tmp/GTEx/eQTL_MatrixTables/GTEx_eQTL_all_snp_gene_associations_", "")
tissue_name = tissue_name.replace("_v8_GRCh38.mt/", "")
print(tissue_name)
mt = hl.read_matrix_table(eqtl_mt)
mts_list.append(mt)
full_mt = multi_way_union_cols(mts_list, ["tissue"])
full_mt = full_mt.checkpoint("gs://hail-datasets-tmp/GTEx/checkpoints/GTEx_eQTL_all_snp_gene_associations_cols_unioned.mt",
overwrite=False,
_read_if_exists=True)
# Re-key rows and repartition
full_mt = hl.read_matrix_table("gs://hail-datasets-tmp/GTEx/checkpoints/GTEx_eQTL_all_snp_gene_associations_cols_unioned.mt",
_n_partitions=1000)
full_mt = full_mt.key_rows_by("locus", "alleles")
full_mt = full_mt.checkpoint("gs://hail-datasets-tmp/GTEx/GTEx_eQTL_all_snp_gene_associations.mt",
overwrite=False,
_read_if_exists=True)
full_mt.describe()
# Add metadata to globals and write final MatrixTable to hail-datasets-us
full_mt = hl.read_matrix_table("gs://hail-datasets-tmp/GTEx/GTEx_eQTL_all_snp_gene_associations.mt")
full_mt = full_mt.repartition(1000, shuffle=True)
n_rows, n_cols = full_mt.count()
n_partitions = full_mt.n_partitions()
full_mt = full_mt.rename({"_gene_id": "gene_id", "_tss_distance": "tss_distance"})
full_mt = full_mt.annotate_globals(
metadata = hl.struct(name = "GTEx_eQTL_all_snp_gene_associations",
reference_genome = "GRCh38",
n_rows = n_rows,
n_cols = n_cols,
n_partitions = n_partitions)
)
# Final eQTL MatrixTable is ~224 GiB w/ 1000 partitions
full_mt.write("gs://hail-datasets-us/GTEx_eQTL_all_snp_gene_associations_v8_GRCh38.mt")
hl.read_matrix_table("gs://hail-datasets-us/GTEx_eQTL_all_snp_gene_associations_v8_GRCh38.mt").describe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: なお,notebook上で $!$ の後の文字列はシェル(ターミナル)に対するコマンドと解釈され,シェルの出力がnotebookの画面に出力されます.
Step2: このファイルの一行の意味は,
Step3: このように,検索結果ファイルはランキング結果を単純に文書IDで表します.たとえば,このファイルは, 検索課題$q_1$に対して $d_1, d_2, d_3$ の順で文書をランキングしたことを表しています.
Step4: シェルのパイプを用いているので,シェルについて詳しくない人は上記コマンドの意味がよく分からないかもしれませんが
Step5: ```
Step6: 3. 評価指標の計算
Step7: コマンドの説明
|
<ASSISTANT_TASK:>
Python Code:
!pyNTCIREVAL
!cat ../data/eval/q1.rel
!cat ../data/eval/method1.q1.res
!pyNTCIREVAL label -r ../data/eval/q1.rel < ../data/eval/method1.q1.res
!pyNTCIREVAL label -r ../data/eval/q1.rel < ../data/eval/method1.q1.res > ../data/eval/method1.q1.rel
!cat ../data/eval/method1.q1.rel
!pyNTCIREVAL compute -r ../data/eval/q1.rel -g 1:3 --cutoffs=1,3 < ../data/eval/method1.q1.rel
!pyNTCIREVAL compute -r ../data/eval/q1.rel -g 1:3 --cutoffs=1,3 < ../data/eval/method1.q1.rel > ../data/eval/method1.q1.eval
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sampling from a bimodal potential
Step2: Sample with HMC
Step3: Sample with NUTS
Step5: Tempered SMC with HMC kernel
Step6: Sampling from the Rastrigin potential
Step7: HMC sampler
Step8: NUTS sampler
Step9: Tempered SMC with HMC kernel
|
<ASSISTANT_TASK:>
Python Code:
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax.scipy.stats import multivariate_normal
jax.config.update("jax_platform_name", "cpu")
import blackjax
import blackjax.smc.resampling as resampling
def V(x):
return 5 * jnp.square(jnp.sum(x**2) - 1)
def prior_log_prob(x):
d = x.shape[0]
return multivariate_normal.logpdf(x, jnp.zeros((d,)), jnp.eye(d))
linspace = jnp.linspace(-2, 2, 5000).reshape(-1, 1)
lambdas = jnp.linspace(0.0, 1.0, 5)
prior_logvals = jnp.vectorize(prior_log_prob, signature="(d)->()")(linspace)
potential_vals = jnp.vectorize(V, signature="(d)->()")(linspace)
log_res = prior_logvals.reshape(1, -1) - jnp.expand_dims(
lambdas, 1
) * potential_vals.reshape(1, -1)
density = jnp.exp(log_res)
normalizing_factor = jnp.sum(density, axis=1, keepdims=True) * (
linspace[1] - linspace[0]
)
density /= normalizing_factor
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(linspace.squeeze(), density.T)
ax.legend(list(lambdas))
def inference_loop(rng_key, mcmc_kernel, initial_state, num_samples):
@jax.jit
def one_step(state, k):
state, _ = mcmc_kernel(k, state)
return state, state
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
def full_logprob(x):
return -V(x) + prior_log_prob(x)
inv_mass_matrix = jnp.eye(1)
n_samples = 10_000
%%time
key = jax.random.PRNGKey(42)
hmc_parameters = dict(
step_size=1e-4, inverse_mass_matrix=inv_mass_matrix, num_integration_steps=50
)
hmc = blackjax.hmc(full_logprob, **hmc_parameters)
hmc_state = hmc.init(jnp.ones((1,)))
hmc_samples = inference_loop(key, hmc.step, hmc_state, n_samples)
samples = np.array(hmc_samples.position[:, 0])
_ = plt.hist(samples, bins=100, density=True)
_ = plt.plot(linspace.squeeze(), density[-1])
%%time
nuts_parameters = dict(step_size=1e-4, inverse_mass_matrix=inv_mass_matrix)
nuts = blackjax.nuts(full_logprob, **nuts_parameters)
nuts_state = nuts.init(jnp.ones((1,)))
nuts_samples = inference_loop(key, nuts.step, nuts_state, n_samples)
samples = np.array(nuts_samples.position[:, 0])
_ = plt.hist(samples, bins=100, density=True)
_ = plt.plot(linspace.squeeze(), density[-1])
def smc_inference_loop(rng_key, smc_kernel, initial_state):
Run the temepered SMC algorithm.
We run the adaptive algorithm until the tempering parameter lambda reaches the value
lambda=1.
def cond(carry):
i, state, _k = carry
return state.lmbda < 1
def one_step(carry):
i, state, k = carry
k, subk = jax.random.split(k, 2)
state, _ = smc_kernel(subk, state)
return i + 1, state, k
n_iter, final_state, _ = jax.lax.while_loop(
cond, one_step, (0, initial_state, rng_key)
)
return n_iter, final_state
%%time
loglikelihood = lambda x: -V(x)
hmc_parameters = dict(
step_size=1e-4, inverse_mass_matrix=inv_mass_matrix, num_integration_steps=1
)
tempered = blackjax.adaptive_tempered_smc(
prior_log_prob,
loglikelihood,
blackjax.hmc,
hmc_parameters,
resampling.systematic,
0.5,
mcmc_iter=1,
)
initial_smc_state = jax.random.multivariate_normal(
jax.random.PRNGKey(0), jnp.zeros([1]), jnp.eye(1), (n_samples,)
)
initial_smc_state = tempered.init(initial_smc_state)
n_iter, smc_samples = smc_inference_loop(key, tempered.step, initial_smc_state)
print("Number of steps in the adaptive algorithm: ", n_iter.item())
samples = np.array(smc_samples.particles[:, 0])
_ = plt.hist(samples, bins=100, density=True)
_ = plt.plot(linspace.squeeze(), density[-1])
def prior_log_prob(x):
d = x.shape[0]
return multivariate_normal.logpdf(x, jnp.zeros((d,)), 2 * jnp.eye(d))
def V(x):
d = x.shape[-1]
res = -10 * d + jnp.sum(x**2 - 10 * jnp.cos(2 * jnp.pi * x), -1)
return res
linspace = jnp.linspace(-5, 5, 5000).reshape(-1, 1)
lambdas = jnp.linspace(0.0, 1.0, 5)
potential_vals = jnp.vectorize(V, signature="(d)->()")(linspace)
log_res = jnp.expand_dims(lambdas, 1) * potential_vals.reshape(1, -1)
density = jnp.exp(-log_res)
normalizing_factor = jnp.sum(density, axis=1, keepdims=True) * (
linspace[1] - linspace[0]
)
density /= normalizing_factor
fig, ax = plt.subplots(figsize=(12, 8))
ax.semilogy(linspace.squeeze(), density.T)
ax.legend(list(lambdas))
def inference_loop(rng_key, mcmc_kernel, initial_state, num_samples):
def one_step(state, k):
state, _ = mcmc_kernel(k, state)
return state, state
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
inv_mass_matrix = jnp.eye(1)
n_samples = 1_000
%%time
key = jax.random.PRNGKey(42)
loglikelihood = lambda x: -V(x)
hmc_parameters = dict(
step_size=1e-2, inverse_mass_matrix=inv_mass_matrix, num_integration_steps=50
)
hmc = blackjax.hmc(full_logprob, **hmc_parameters)
hmc_state = hmc.init(jnp.ones((1,)))
hmc_samples = inference_loop(key, hmc.step, hmc_state, n_samples)
samples = np.array(hmc_samples.position[:, 0])
_ = plt.hist(samples, bins=100, density=True)
_ = plt.plot(linspace.squeeze(), density[-1])
_ = plt.yscale("log")
%%time
nuts_parameters = dict(step_size=1e-2, inverse_mass_matrix=inv_mass_matrix)
nuts = blackjax.nuts(full_logprob, **nuts_parameters)
nuts_state = nuts.init(jnp.ones((1,)))
nuts_samples = inference_loop(key, nuts.step, nuts_state, n_samples)
samples = np.array(nuts_samples.position[:, 0])
_ = plt.hist(samples, bins=100, density=True)
_ = plt.plot(linspace.squeeze(), density[-1])
_ = plt.yscale("log")
%%time
loglikelihood = lambda x: -V(x)
hmc_parameters = dict(
step_size=1e-2, inverse_mass_matrix=inv_mass_matrix, num_integration_steps=100
)
tempered = blackjax.adaptive_tempered_smc(
prior_log_prob,
loglikelihood,
blackjax.hmc,
hmc_parameters,
resampling.systematic,
0.75,
mcmc_iter=1,
)
initial_smc_state = jax.random.multivariate_normal(
jax.random.PRNGKey(0), jnp.zeros([1]), jnp.eye(1), (n_samples,)
)
initial_smc_state = tempered.init(initial_smc_state)
n_iter, smc_samples = smc_inference_loop(key, tempered.step, initial_smc_state)
print("Number of steps in the adaptive algorithm: ", n_iter.item())
samples = np.array(smc_samples.particles[:, 0])
_ = plt.hist(samples, bins=100, density=True)
_ = plt.plot(linspace.squeeze(), density[-1])
_ = plt.yscale("log")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Pair potentials
Step5: Interaction parameter
Step6: Interactive EOS plot
Step8: With Data
|
<ASSISTANT_TASK:>
Python Code:
import inspect
from math import sqrt, pi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ipywidgets import interact
from ipywidgets.widgets import FloatSlider, IntSlider
from scipy.integrate import quad
plt.rcParams.update({'font.size': 16})
# Debye-Huckel
def potential_Debye_Huckel(r, z, D):
r$\frac{\lambda_B z^2}{r} e^{-r/\lambda_D}$
lB = 7.0 # Bjerrum length, angstroms
return lB * z**2 * np.exp(-r/D) / r
# Lennard-Jones
def potential_Lennard_Jones(r, eps, sigma):
r$4\beta \varepsilon_{LJ} \left ( \left ( \frac{\sigma}{r}\right )^{12} - \left ( \frac{\sigma}{r}\right )^{6}\right )$
return 4 * eps * ( (sigma/r)**12 - (sigma/r)**6 )
# Total potential
def potential_Combined(r, z, D, eps, sigma):
r$\frac{\lambda_B z^2}{r} e^{-r/\lambda_D} + 4\beta \varepsilon_{LJ} \left ( \left ( \frac{\sigma}{r}\right )^{12} - \left ( \frac{\sigma}{r}\right )^{6}\right )$
return potential_Debye_Huckel(r, z, D) + potential_Lennard_Jones(r, eps, sigma)
def ahat(potential, **parameters):
sigma = parameters['sigma']
# extract the relevant parameters for the potential
parameters = {k:v for k,v in parameters.items() if k in inspect.signature(potential).parameters}
def integrand(r):
return potential(r, **parameters) * r**2
integral, error = quad(integrand, sigma, np.infty, limit=50)
return -2 * pi * integral
def ahatexact(z, D, eps, sigma):
return -2 * pi * (-8/9 * eps * sigma**3 + 7 * np.exp(-sigma/D) * z**2 * (D + sigma))
def mu_ideal(n):
return np.log(n)
def mu_gvdw_backup(n, z, D, eps, sigma, potential=potential_Combined):
y0 = pi*sigma**2 / 2
y = 1 / n
a = ahat(potential, z=z, D=D, eps=eps, sigma=sigma)
return np.log(n) + np.log(y / (y-y0)) + y / (y-y0) - 2 * a/y
def mu_gvdw(n, z, D, eps, sigma, potential=potential_Combined):
y0 = pi*sigma**2 / 2 # excluded volume
y = 1 / n
a = ahat(potential, z=z, D=D, eps=eps, sigma=sigma)
return - np.log(y-y0) - y0 / (y - y0) - 2 * a / y
n = np.linspace(1e-4, 1e-3, 100)
_potentials = {fname[10:]: func for fname, func in globals().items() if fname.startswith("potential_")}
sliders = {
"eps": FloatSlider(min=0, max=10, step=0.1, value=1, description=r'LJ , $\varepsilon_{LJ}$ ($\beta$)'),
"sigma": FloatSlider(min=0, max=10, step=0.1, value=4, description=r'Size, $\sigma_{LJ}$ (Å)'),
"z": IntSlider(min=0, max=3, step=1, value=1, description=r'Charge, $z$ ($e$)'),
"Cs": FloatSlider(min=1e-3, max=1, step=0.1, value=0.3, description="Salt, $c_s$ (M)"),
"potential": _potentials
}
@interact(**sliders)
def plot_EOS( eps=1.0, sigma=4.0, z=1.0, Cs=0.3, potential=potential_Combined):
D = 3.04/sqrt(Cs)
plt.figure(figsize=(10, 10/1.618), )
plt.plot(n, 10 + mu_ideal(n), 'k--', label='ideal', lw=2)
plt.plot(n, 10 + mu_gvdw(n, z, D, eps, sigma, potential=potential), 'r-', label=potential.__doc__ or potential.__name__, lw=2)
plt.title('Equation of State')
plt.xlabel(r'Number density, $n$')
plt.ylabel(r'Potential, $\beta \mu$')
plt.ylim([-2,10])
plt.legend(loc=4, frameon=False)
plt.show()
with open("datafile.csv", "wt") as stream:
stream.write(\
length potential proteins density
1.414000000000000000e+03 1.429585460731450097e-01 2.000000000000000000e+01 1.000302091231552026e+03
9.990000000000000000e+02 2.990882091428900269e-01 2.000000000000000000e+01 2.004006008010011783e+03
8.160000000000000000e+02 4.684432472751309806e-01 2.000000000000000000e+01 3.003652441368703876e+03
6.320000000000000000e+02 8.629727385734929923e-01 2.000000000000000000e+01 5.007210382951449901e+03
5.340000000000000000e+02 1.353602607621670062e+00 2.000000000000000000e+01 7.013704779138435697e+03
4.710000000000000000e+02 1.970895704549270100e+00 2.000000000000000000e+01 9.015466031977857710e+03
4.260000000000000000e+02 2.788653065634310035e+00 2.000000000000000000e+01 1.102074103462716812e+04
3.920000000000000000e+02 3.842403663548089821e+00 2.000000000000000000e+01 1.301541024573094546e+04)
df = pd.read_csv("datafile.csv", delimiter="(?:\s+|,)", engine="python")
def plot_EOS( eps=1.0, sigma=4.0, z=0.0, Cs=0.3, potential=potential_Combined):
D = 3.04/sqrt(Cs)
# plt.title(potential.__doc__)
plt.plot(n, 10+mu_ideal(n), 'k--', label='ideal', lw=2)
plt.plot(n, 10+mu_gvdw(n, z, D, eps, sigma, potential=potential), 'r-', label=potential.__doc__ or potential.__name__, lw=2)
plt.plot(df.density/10**8, df.potential + mu_ideal((df.density/10**8)) + 10, label="data")
plt.title('Equation of State')
plt.xlabel(r'Number density, $n$')
plt.ylabel(r'Potential, $\beta \mu$')
plt.legend(loc=0, frameon=False)
plt.show()
data_density = df.density/10**8
n = np.linspace(data_density[0], list(data_density)[-1], 100)
_potentials = {fname: func for fname, func in globals().items() if fname.startswith("potential_")}
i = interact(plot_EOS,
eps=(0.0, 10.0, 0.1),
sigma=(0, 10, 0.1),
z=(0.0, 3, 1.0),
Cs=(1e-3, 1.0, 0.1),
potential = _potentials )
i.widget.children[0].description=r'$\beta\varepsilon_{LJ}$'
i.widget.children[1].description=r'$\sigma_{LJ}$'
i.widget.children[2].description=r'$z$'
i.widget.children[3].description=r'$c_s$ (M)'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1.3
Step2: Problem 1.4
Step3: The 95% confidence interval for $\frac{\alpha}{c_p}$ is 0.0289 $\pm$ 0.001 inverse hours
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('november.csv')
ind = data['center_diff_valid']
plt.plot(data['time'][ind] / (60 * 60 * 24), data['temp'][ind], '.')
plt.ylabel('Temperature [F]')
plt.xlabel('Days')
plt.show()
Th = data['temp'][ind].values
Tc = data['outdoor_temp'][ind].values
time = data['time'][ind].values
diff = (Th[1:] - Th[:-1]) / (time[1:] - time[:-1])
acp = -60 * 60 * 0.5 / (Th[1:-1] - Tc[1:-1]) * (diff[1:] + diff[:-1])
plt.plot(time[1:-1] / (60 * 60 * 24), acp, '.')
plt.xlabel('days')
plt.ylabel(r'$\frac{\alpha}{c_p}$ [s$^{-1}$]')
plt.show()
import scipy.stats as ss
xbar = np.nanmean(acp)
se = np.nanstd(acp) / np.sqrt(len(acp))
y = ss.norm.ppf(0.975)
print('{} +/- {}'.format(xbar, y * se))
plt.hist(acp[np.isnan(acp) == False])
plt.xlabel(r'$\frac{\alpha}{c_p}$ [s$^{-1}$]')
plt.ylabel('Counts')
plt.show()
acp_pre = acp
data = pd.read_csv('december.csv')
ind = data['center_diff_valid']
Th = data['temp'][ind].values
Tc = data['outdoor_temp'][ind].values
time = data['time'][ind].values
diff = (Th[1:] - Th[:-1]) / (time[1:] - time[:-1])
acp_post = -60 * 60 * 0.5 / (Th[1:-1] - Tc[1:-1]) * (diff[1:] + diff[:-1])
print(np.nanmean(acp_post), np.nanmean(acp_pre))
print(ss.ranksums(acp_pre, acp_post))
print((np.nanmean(acp_post) - np.nanmean(acp_pre)) / np.nanmean(acp_pre))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csiro-bom', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: La position des sommets et des arêtes n'a pas d'importance d'un point de vue mathématiques.
Step2: Matrices
Step3: Pour les curieux voir l'explication
Step4: En python, les combinaisons sont générés grace à la fonction combinations disponible dans le module itertools
Step5: Maintenant pour reproduire le tableau, nous allons
Step6: On reproduit le tableau par colonne de deux en affichant le tableau
Step7: ! Nous n'avons ici la même représentation de l'information.
Step8: => Quel est l'ordre de ce grapĥe?
Step9: => Pour chaque sommet
Step10: ==> Quel sont le degré maximal et minimal de ce graphe?
Step11: => Créer une fonction qui définit s'il s'agit d'un graphe régulier
Step12: => Quelle est la somme des degrés de ce graphe?
Step13: Chaîne, cycle et parcours de graphe
Step14: Un sous-graphe est une partie d'un graphe composé de quelques uns de ses sommets
|
<ASSISTANT_TASK:>
Python Code:
#importer les libraires
#pour afficher
import matplotlib.pyplot as plt
#pour le calcul
import numpy as np
#pour le réseau
import networkx as nx
%matplotlib inline
#instancier le graph
g = nx.Graph()
#ajouter un noeud
#g.add_node("paul")
#ajouter une liste de noeud
g.add_nodes_from(["paul", "matthieu", "jean", "luc", "marc"])
#ajouter un lien (edge)
#g.add_edge("paul","matthieu")
#ajouter une liste de liens (edges)
g.add_edges_from([("paul", "matthieu"), ("matthieu", "jean"),("jean", "marc"), ("marc", "luc"), ("luc", "matthieu")])
#Combien de noeuds?
print "Ce graphe compte", g.number_of_nodes(), "noeuds:"
print(g.nodes())
#Combiens de liens?
print "Ce graphe compte", g.number_of_edges(), "liens"
print(g.edges())
#Affichons ce graphe
#un simple graph
nx.draw(g)
plt.savefig("graphe_apotres.png") # save as png
plt.show() # display
#Ajoutons un noeud
g.add_node("pierre", name="pierre")
#et ses relations
g.add_edges_from([("pierre", "matthieu"), ("paul", "pierre")])
#On peut enlever des noeuds ou détruire des liens de la même ma,ière que lorsqu'on les ajoute
#g.remove_node("jean")
#print(g.remove_nodes_from([])
#si le positionnement est aléatoire en mathématique en python
#il faut définir l'agencement du graphe
#par défault networkx propose spring_layout
# nous verrons par la suite les agencements (layout) proposés
pos=nx.spring_layout(g)
#ici nous ajoutons une couleur et un label aux noeuds
nx.draw(g,pos, node_color="g",with_labels=True)
plt.savefig("graphe_example.png")
%%latex
$C_n^k = \frac{!n}{ n!(n-k)!}$
def combo_score(n,k):
from math import factorial as fact
#en combinatoire calculer le nombre de combinaison de 2 elements sur un ensemble de 482 elements
#https://fr.wikipedia.org/wiki/Combinaison_(math%C3%A9matiques)
#combin = lambda n,k: fact(n)//(fact(k)*fact(n-k))
return fact(n)/(fact(k)*fact(n-k))
from itertools import combinations
matrix = [n for n in combinations(g.nodes(),2)]
print len(matrix), "liens possibles"
for n in matrix:
print "-", n[0],"&",n[1]
matrix_0 = []
for n in matrix:
if n in g.edges():
matrix_0.append(n + (1,))
else:
matrix_0.append(n + (0,))
print matrix_0
print "---------------------------------------"
print "\t|\t".join(["Noeud", "Noeud", "Relié?"])
for n in matrix_0:
print "---------------------------------------"
print "\t|\t".join([str(i) for i in n])
sommets = g.nodes()
print sommets
print g.number_of_nodes()
for n in g.nodes():
print "---"
print "\t-", n, "a pour noeuds adjacents:",g.neighbors(n)
print "\t-", n,"est un sommet de degré", len(g.neighbors(n))
nb_sommets = g.number_of_nodes()
print "Ce graphe est d'ordre", nb_sommets
#calculer le nombre de liens pour chaque noeud
degrees = [len(g.neighbors(n)) for n in g.nodes())]
max_degree = max(degrees)
min_degree = min(degrees)
print "- son degré maximal est de :", max_degree
print "- son degré minimal est de :", min_degree
def is_regular_graph(g):
degrees = [len(g.neighbors(n)) for n in g.nodes()]
return bool(max(degrees) ==min(degrees))
print "Ce graph est il regulier?", is_regular_graph(g)
sum_degree = len(g.edges()) *2
print "Ce graphe a donc %i arêtes" %sum_degree
#Les parcours possibles sans repasser par le même noeud depuis "paul" jusqu'à "luc"
print "parcours de Paul à Luc:"
for path in nx.all_simple_paths(g, "paul", "luc"):
print "==>".join(path)
#Tous les cycles possibles sans repasser par le même noeud depuis "paul" jusqu'à "paul"
#la limite de recherche correspond au degré maximal du graph
print "\nparcours de Paul à Paul:"
for path in nx.all_simple_paths(g, "paul", "paul", 6):
print "==>".join(path)
### Les types de graphes
* Un graphe est **complet** lorsque deux sommets différents quelconques sont toujours adjacents
Un graphe complet est donc un graphe dont tous les sommets sont reliés entre eux au niveau 1
Autrement dit tous les liens entre les noeuds existent
On peut le créer programmatiquement avec ce qu'on a déjà vu
graphe_complet = nx.Graph()
graphe_complet.add_nodes_from([1, 2, 3, 4])
edges = combinations(graphe_complet.nodes(), 2)
#et ses relations
graphe_complet.add_edges_from(edges)
nx.draw(graphe_complet, with_labels = True)
plt.savefig("graphe_complet1.png") # save as png
plt.show() # display
* Un graphe est **connexe** lorsque quels que soient les sommets *i* et *j* considérés, il existe
toujours une chaine reliant *i* et *j*
Lorsque qu'on examine deux sommets d'un graphe le graphe est connexe n'importe lequel des sommets est relié
par une chaine: il n'y a pas de noeud isolé
graphe_connexe = nx.Graph()
#on ajoute ses noeuds
graphe_connexe.add_nodes_from([1, 2, 3, 4, 5])
#et ses liens
edges = [(1,2), (1,3), (1,4), (1,5), (2,3), (3,5), (3,4), (4,5)]
graphe_connexe.add_edges_from(edges)
nx.draw(graphe_connexe, with_labels = True)
plt.savefig("graphe_connexe1.png") # save as png
plt.show() # display
g3 = nx.Graph()
graphe_connexe.add_nodes_from([1, 2, 3, 4, 5])
* Un graphe ou un sous-graphe est stable quand aucune arête ne relie les sommets
import networkx as nx
G=nx.cubical_graph()
pos=nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,
nodelist=[0,1,2,3],
node_color='r',
node_size=500,
alpha=0.8)
nx.draw_networkx_nodes(G,pos,
nodelist=[4,5,6,7],
node_color='b',
node_size=500,
alpha=0.8)
# edges
nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)
nx.draw_networkx_edges(G,pos,
edgelist=[(0,1),(1,2),(2,3),(3,0)],
width=8,alpha=0.5,edge_color='r')
nx.draw_networkx_edges(G,pos,
edgelist=[(4,5),(5,6),(6,7),(7,4)],
width=8,alpha=0.5,edge_color='b')
# some math labels
labels={}
labels[0]=r'$a$'
labels[1]=r'$b$'
labels[2]=r'$c$'
labels[3]=r'$d$'
labels[4]=r'$\alpha$'
labels[5]=r'$\beta$'
labels[6]=r'$\gamma$'
labels[7]=r'$\delta$'
nx.draw_networkx_labels(G,pos,labels,font_size=16)
plt.axis('off')
plt.savefig("labels_and_colors.png") # save as png
plt.show() # display
## Du graphe à la matrice
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hello World
Step2: Try to get the following code to print the same message as the one above.
Step3: Strings
Step4: We now want to manipulate these strings. A few exercises are below. Take as much time as you need; you are not expected to be able to complete them all.
Step5: Numbers
Step6: Let's see this in action below.
Step7: This type of distinction matters because roundoff errors can be an issue. These are a humongous pain but can be dealt with once you know what to watch out for.
Step8: Note that everything in Python is at some level an object. Our variables are no exception.
Step9: Math
Step10: Lists
Step11: Now let's print out an element of the list and capitalize it. We can figure out how to do this within the notebook by taking advantage of tab completion.
Step12: See if you can figure out how to find where 'australian cattle dog' is located using just standard Python operators and built-in list functions.
Step13: As before, see if you can figure out how to append items to the end of a list or insert them in the middle of a list. Append 'poodle' to the end of the list, then insert 'chihuahua' after 'border collie' without entering the position by hand.
Step14: Now change border collie to 'maltese' and remove 'labrador retriever' from the list.
Step15: Finally, a hugely useful concept in Python is slicing objects, where we can return only relevant portions of an object without modifying the relevant object itself.
Step16: Tuples
Step17: Why might we want to use tuples instead of lists?
Step18: Note that dictionaries do not maintain any sense of order for the input parameters. This is by design, since this lack of ordering allows dictionaries to be implemented very efficiently.
Step19: Conditionals
Step20: Loops
Step21: This flexibility makes loops in Python incredibly powerful. As an example, see if you can get the loop below to run using some combination of (1) range, (2) zip, and (3) enumerate.
Step22: Alternately, we can use while loops to get things done.
Step23: Variable (Re-)Assignment
Step24: Good Coding Practices
Step25: In addition, there's also the PEP8 style guide.
|
<ASSISTANT_TASK:>
Python Code:
# Ensures compatibility between Python 2 and 3.
from __future__ import print_function, division
from __builtin__ import range
print('Hello Python world!')
...
print(message)
# Example of two strings.
my_string = "This is a double-quoted string."
your_string = 'This is a single-quoted string.'
# Example of types.
quote = "Josh once asked, 'Is this distinction useful?'"
followup = 'What do you think?'
# Combine the two lines (`quote`, `followup`) into one string with appropriate spacing.
combined_quote = ...
print(quote, followup) # printing two strings
print(combined_quote) # print the combined string
# Check whether the two strings are identical.
s1, s2 = "Test.", 'Test.'
same = ...
print(same)
# Split `my_string` into a separate string for each word.
collection_of_strings = ...
print(collection_of_strings)
# Check whether the *letter* `a` is in `quote`.
a_lettercheck = ...
print(a_lettercheck)
# Check whether the *word* `a` is in `quote`.
a_wordcheck = ...
print(a_wordcheck)
# Define two variables.
a, b, c = 3, 2, 5.
print('a = {0}'.format(a))
print('b =', b)
print('c', '=', c)
# Addition.
x = a + b
print(x, type(x))
# Subtraction.
x = c - a
print(x, type(x))
# Multiplication.
x = b * a
print(x, type(x))
# Division.
x = a / b
print(x, type(x))
# Exponentiation.
x = c ** b
print(x, type(x))
# ???
x = b // a
print(x, type(x))
x = c // a
print(x, type(x))
f1, f2 = 0.3, 0.1 + 0.2
f1, f2, f1 == f2 # leaving variables for auto-output
f1.as_integer_ratio(), f2.as_integer_ratio(), (0.5).as_integer_ratio()
# Compute magnitude given flux.
F = ...
m = ...
print(m)
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
# Access element.
dog = dogs[0]
# Print result.
print(dog.)
# Searching list.
dogs.index(...)
# Append.
# Insert.
print(dogs)
# Remove element.
dogs.remove(...)
dogs[0] = '...'
print(dogs)
# Slice an array
print(dogs[1:3])
print(dogs)
date = (6, 11, 2018)
# Check tomorrow's date.
# Initialize dictionary.
numbers = {'one': 1, 'two': 2, 'three': 3}
# Access a value using its key.
print(numbers['two'])
# Add a new element.
numbers['ninety'] = 90
print(numbers)
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
# Union: items appearing in either
print(primes.union(odds)) # equivalently with a method
# Intersction
{1, 2, 3, 5, 7, 9}
print(primes.intersection(odds)) # equivalently with a method
# difference: Items in primes but not in odds
print(primes.difference(odds)) # equivalently with a method
x = -15
if x == 0:
print(x, "is zero")
elif x > 0:
print(x, "is positive")
elif x < 0:
print(x, "is negative")
else:
print(x, "is unlike anything I've ever seen...")
# Traditional loop.
for i in range(3):
print('I like ' + dogs[i] + 's.')
# "Pythonic" loop.
for dog in dogs:
print('I like ' + dog + 's.')
for ... in ...:
print(counter, ':', dog)
i = 0
while i < len(dogs):
print(i, ':', dogs[i])
i += 1
# Example 1.
josh = quote # reassign
josh += " He got #rekt." # ???
print(quote)
print(josh)
# Example 2.
quotes = quote.split() # reassign
josh = quotes
josh += " He got #rekt.".split() # ???
print(quotes)
print(josh)
# Example 3.
quotes = quote.split() # reassign
josh = quotes[:]
josh += " He got #rekt.".split() # ???
print(quotes)
print(josh)
# Example 4.
d = b
d += 3.
print(b)
print(d)
import this
title = "Banneker + Aztlan Initiative"
month = 6
day = 11
year = 2018
width = 30
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Real-Time Loop Through
Step2: Real-Time Filtering
Step3: Create some global variables for the filter coefficients and the filter state array (recall that a filter has memory).
Step4: Real-Time Playback
Step5: Real-Time Audio Capture/Record
Step6: Capture Buffer Analysis
Step7: Note for a attributes used in the above examples the frame_length is alsways 1024 samples and the sampling rate $f_s = 44.1$ ksps. The ideal callback period is this
Step8: Next consider what the captures tic and toc data revels about the processing. Calling the method cb_active_plot() produces a plot similar to what an electrical engineer would see what using a logic analyzer to show the time spent in an interrupt service routine of an embedded system. The latency is also evident. You expect to see a minimum latency of two frame lengths (input buffer fill and output buffer fill),e.g.,
Step9: Finally, the spectrum of the output signal. To apply custon scaling we use a variation of psd() found in the sigsys module. If we are plotting the spectrum of white noise sent through a filter, the output PSD will be of the form $\sigma_w^2|H(e^{j2\pi f/f_s})|^2$, where $\sigma_w^2$ is the variance of the noise driving the filter. You may choose to overlay a plot of
|
<ASSISTANT_TASK:>
Python Code:
Image('PyAudio_RT_flow@300dpi.png',width='90%')
pah.available_devices()
# define a pass through, y = x, callback
def callback(in_data, frame_count, time_info, status):
DSP_IO.DSP_callback_tic()
# convert byte data to ndarray
in_data_nda = np.fromstring(in_data, dtype=np.int16)
#***********************************************
# DSP operations here
# Here we apply a linear filter to the input
x = in_data_nda.astype(float32)
y = x
# Typically more DSP code here
#***********************************************
# Save data for later analysis
# accumulate a new frame of samples
DSP_IO.DSP_capture_add_samples(y)
#***********************************************
# Convert from float back to int16
y = y.astype(int16)
DSP_IO.DSP_callback_toc()
# Convert ndarray back to bytes
#return (in_data_nda.tobytes(), pyaudio.paContinue)
return y.tobytes(), pah.pyaudio.paContinue
DSP_IO = pah.DSP_io_stream(callback,2,1,Tcapture=0)
DSP_IO.stream(5)
import sk_dsp_comm.fir_design_helper as fir_d
b = fir_d.fir_remez_bpf(2500,3000,4500,5000,.5,60,44100,18)
fir_d.freqz_resp_list([b],[1],'dB',44100)
ylim([-80,5])
grid();
# Design an IIR Notch
b, a = ss.fir_iir_notch(3000,44100,r= 0.9)
fir_d.freqz_resp_list([b],[a],'dB',44100,4096)
ylim([-60,5])
grid();
# For the FIR filter 'b' is defined above, but 'a' also needs to be declared
# For the IIR notch filter both 'b' and 'a' are declared above
a = [1]
zi = signal.lfiltic(b,a,[0])
#zi = signal.sosfilt_zi(sos)
# define callback (#2)
def callback2(in_data, frame_count, time_info, status):
global b, a, zi
DSP_IO.DSP_callback_tic()
# convert byte data to ndarray
in_data_nda = np.fromstring(in_data, dtype=np.int16)
#***********************************************
# DSP operations here
# Here we apply a linear filter to the input
x = in_data_nda.astype(float32)
#y = x
# The filter state/(memory), zi, must be maintained from frame-to-frame
y, zi = signal.lfilter(b,a,x,zi=zi) # for FIR or simple IIR
#y, zi = signal.sosfilt(sos,x,zi=zi) # for IIR use second-order sections
#***********************************************
# Save data for later analysis
# accumulate a new frame of samples
DSP_IO.DSP_capture_add_samples(y)
#***********************************************
# Convert from float back to int16
y = y.astype(int16)
DSP_IO.DSP_callback_toc()
return y.tobytes(), pah.pyaudio.paContinue
DSP_IO = pah.DSP_io_stream(callback2,2,1,Tcapture=0)
DSP_IO.stream(5)
# define callback (2)
# Here we configure the callback to play back a wav file
def callback3(in_data, frame_count, time_info, status):
DSP_IO.DSP_callback_tic()
# Ignore in_data when generating output only
#***********************************************
global x
# Note wav is scaled to [-1,1] so need to rescale to int16
y = 32767*x.get_samples(frame_count)
# Perform real-time DSP here if desired
#
#***********************************************
# Save data for later analysis
# accumulate a new frame of samples
DSP_IO.DSP_capture_add_samples(y)
#***********************************************
# Convert from float back to int16
y = y.astype(int16)
DSP_IO.DSP_callback_toc()
return y.tobytes(), pah.pyaudio.paContinue
#fs, x_wav = ss.from_wav('OSR_us_000_0018_8k.wav')
fs, x_wav2 = ss.from_wav('Music_Test.wav')
x_wav = (x_wav2[:,0] + x_wav2[:,1])/2
#x_wav = x_wav[15000:90000]
x = pah.loop_audio(x_wav)
#DSP_IO = pah.DSP_io_stream(callback3,2,1,fs=8000,Tcapture=2)
DSP_IO = pah.DSP_io_stream(callback3,2,1,fs=44100,Tcapture=2)
DSP_IO.stream(20)
# define callback (2)
# Here we configure the callback to capture a one channel input
def callback4(in_data, frame_count, time_info, status):
DSP_IO.DSP_callback_tic()
# convert byte data to ndarray
in_data_nda = np.fromstring(in_data, dtype=np.int16)
#***********************************************
# DSP operations here
# Here we apply a linear filter to the input
x = in_data_nda.astype(float32)
y = x
#***********************************************
# Save data for later analysis
# accumulate a new frame of samples
DSP_IO.DSP_capture_add_samples(y)
#***********************************************
# Convert from float back to int16
y = 0*y.astype(int16)
DSP_IO.DSP_callback_toc()
# Convert ndarray back to bytes
#return (in_data_nda.tobytes(), pyaudio.paContinue)
return y.tobytes(), pah.pyaudio.paContinue
DSP_IO = pah.DSP_io_stream(callback4,0,1,fs=22050)
DSP_IO.stream(5)
DSP_IO.stream_stats()
T_cb = 1024/44100 * 1000 # times 1000 to get units of ms
print('Callback/Frame period = %1.4f (ms)' % T_cb)
subplot(211)
DSP_IO.cb_active_plot(0,270) # enter start time (ms) and stop time (ms)
subplot(212)
DSP_IO.cb_active_plot(150,160)
tight_layout()
Npts = 1000
Nstart = 1000
plot(arange(len(DSP_IO.data_capture[Nstart:Nstart+Npts]))*1000/44100,
DSP_IO.data_capture[Nstart:Nstart+Npts])
title(r'A Portion of the capture buffer')
ylabel(r'Amplitude (int16)')
xlabel(r'Time (ms)')
grid();
Pxx, F = ss.my_psd(DSP_IO.data_capture,2**13,44100);
fir_d.freqz_resp_list([b],[a],'dB',44100)
plot(F,10*log10(Pxx/max(Pxx))+3,'g') # Normalize by the max PSD
ylim([-80,5])
xlim([100,20e3])
grid();
specgram(DSP_IO.data_capture,1024,44100);
ylim([0, 5000])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Check out the data
Step3: Step 2
Step4: STEP 3
Step5: Perform Manual CHECKS on the irf_utils
Step 4
Step6: Plot Ranked Feature Importances
Step7: Decision Tree 0 (First) - Get output
Step8: Compare to our dict of extracted data from the tree
Step9: Check output against the diagram
Step11: Wrapper function for iRF
Step12: Run the iRF function
Step13: all_rit_bootstrap_output
Step14: Compare to the original single fitted random forest (top of the notebook)!
Step15: These look like they match as required!
Step16:
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
import numpy as np
from functools import reduce
# Needed for the scikit-learn wrapper function
from sklearn.utils import resample
from sklearn.ensemble import RandomForestClassifier
from math import ceil
# Import our custom utilities
from imp import reload
from utils import irf_jupyter_utils
from utils import irf_utils
reload(irf_jupyter_utils)
reload(irf_utils)
load_breast_cancer = load_breast_cancer()
X_train, X_test, y_train, y_test, rf = irf_jupyter_utils.generate_rf_example(n_estimators=1000,
feature_weight=None)
print("Training feature dimensions", X_train.shape, sep = ":\n")
print("\n")
print("Training outcome dimensions", y_train.shape, sep = ":\n")
print("\n")
print("Test feature dimensions", X_test.shape, sep = ":\n")
print("\n")
print("Test outcome dimensions", y_test.shape, sep = ":\n")
print("\n")
print("first 5 rows of the training set features", X_train[:2], sep = ":\n")
print("\n")
print("first 5 rows of the training set outcomes", y_train[:2], sep = ":\n")
all_rf_tree_data = irf_utils.get_rf_tree_data(rf=rf,
X_train=X_train, y_train=y_train,
X_test=X_test, y_test=y_test)
all_rit_tree_data = irf_utils.get_rit_tree_data(
all_rf_tree_data=all_rf_tree_data,
bin_class_type=1,
random_state=12,
M=100,
max_depth=2,
noisy_split=False,
num_splits=2)
#for i in range(100):
# print(all_rit_tree_data['rit{}'.format(i)]['rit_leaf_node_union_value'])
# Print the feature ranking
print("Feature ranking:")
feature_importances_rank_idx = all_rf_tree_data['feature_importances_rank_idx']
feature_importances = all_rf_tree_data['feature_importances']
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1
, feature_importances_rank_idx[f]
, feature_importances[feature_importances_rank_idx[f]]))
# Plot the feature importances of the forest
feature_importances_std = all_rf_tree_data['feature_importances_std']
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train.shape[1])
, feature_importances[feature_importances_rank_idx]
, color="r"
, yerr = feature_importances_std[feature_importances_rank_idx], align="center")
plt.xticks(range(X_train.shape[1]), feature_importances_rank_idx)
plt.xlim([-1, X_train.shape[1]])
plt.show()
# Now plot the trees individually
#irf_jupyter_utils.draw_tree(decision_tree = all_rf_tree_data['rf_obj'].estimators_[0])
#irf_jupyter_utils.pretty_print_dict(inp_dict = all_rf_tree_data['dtree0'])
# Count the number of samples passing through the leaf nodes
sum(all_rf_tree_data['dtree0']['tot_leaf_node_values'])
#irf_jupyter_utils.pretty_print_dict(inp_dict = all_rf_tree_data['dtree0']['all_leaf_paths_features'])
def run_rit(X_train,
X_test,
y_train,
y_test,
K=7,
n_estimators=20,
B=10,
random_state_classifier=2018,
propn_n_samples=0.2,
bin_class_type=1,
random_state=12,
M=4,
max_depth=2,
noisy_split=False,
num_splits=2):
This function will allow us to run the RIT
for the given parameters
# Set the random state for reproducibility
np.random.seed(random_state_classifier)
# Convert the bootstrap resampling proportion to the number
# of rows to resample from the training data
n_samples = ceil(propn_n_samples * X_train.shape[0])
# Initialize dictionary of rf weights
# CHECK: change this name to be `all_rf_weights_output`
all_rf_weights = {}
# Initialize dictionary of bootstrap rf output
all_rf_bootstrap_output = {}
# Initialize dictionary of bootstrap RIT output
all_rit_bootstrap_output = {}
for k in range(K):
if k == 0:
# Initially feature weights are None
feature_importances = None
# Update the dictionary of all our RF weights
all_rf_weights["rf_weight{}".format(k)] = feature_importances
# fit RF feature weights i.e. initially None
rf = RandomForestClassifier(n_estimators=n_estimators)
# fit the classifier
rf.fit(
X=X_train,
y=y_train,
feature_weight=all_rf_weights["rf_weight{}".format(k)])
# Update feature weights using the
# new feature importance score
feature_importances = rf.feature_importances_
# Load the weights for the next iteration
all_rf_weights["rf_weight{}".format(k + 1)] = feature_importances
else:
# fit weighted RF
# Use the weights from the previous iteration
rf = RandomForestClassifier(n_estimators=n_estimators)
# fit the classifier
rf.fit(
X=X_train,
y=y_train,
feature_weight=all_rf_weights["rf_weight{}".format(k)])
# Update feature weights using the
# new feature importance score
feature_importances = rf.feature_importances_
# Load the weights for the next iteration
all_rf_weights["rf_weight{}".format(k + 1)] = feature_importances
# Run the RITs
for b in range(B):
# Take a bootstrap sample from the training data
# based on the specified user proportion
X_train_rsmpl, y_rsmpl = resample(
X_train, y_train, n_samples=n_samples)
# Set up the weighted random forest
# Using the weight from the (K-1)th iteration i.e. RF(w(K))
rf_bootstrap = RandomForestClassifier(
#CHECK: different number of trees to fit for bootstrap samples
n_estimators=n_estimators)
# Fit RF(w(K)) on the bootstrapped dataset
rf_bootstrap.fit(
X=X_train_rsmpl,
y=y_rsmpl,
feature_weight=all_rf_weights["rf_weight{}".format(K)])
# All RF tree data
# CHECK: why do we need y_train here?
all_rf_tree_data = irf_utils.get_rf_tree_data(
rf=rf_bootstrap,
X_train=X_train_rsmpl,
y_train=y_rsmpl,
X_test=X_test,
y_test=y_test)
# Update the rf bootstrap output dictionary
all_rf_bootstrap_output['rf_bootstrap{}'.format(b)] = all_rf_tree_data
# Run RIT on the interaction rule set
# CHECK - each of these variables needs to be passed into
# the main run_rit function
all_rit_tree_data = irf_utils.get_rit_tree_data(
all_rf_tree_data=all_rf_tree_data,
bin_class_type=1,
random_state=12,
M=4,
max_depth=2,
noisy_split=False,
num_splits=2)
# Update the rf bootstrap output dictionary
# We will reference the RIT for a particular rf bootstrap
# using the specific bootstrap id - consistent with the
# rf bootstrap output data
all_rit_bootstrap_output['rf_bootstrap{}'.format(
b)] = all_rit_tree_data
return all_rf_weights, all_rf_bootstrap_output, all_rit_bootstrap_output
all_rf_weights, all_rf_bootstrap_output, all_rit_bootstrap_output =\
run_rit(X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test,
K=6,
n_estimators=20,
B=10,
random_state_classifier=2018,
propn_n_samples=0.2,
bin_class_type=1,
random_state=12,
M=4,
max_depth=2,
noisy_split=False,
num_splits=2)
all_rit_bootstrap_output['rf_bootstrap1']
all_rf_weights_1iter, all_rf_bootstrap_output_1iter, all_rit_bootstrap_output_1iter =\
run_rit(X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test,
K=1,
n_estimators=1000,
B=10,
random_state_classifier=2018,
propn_n_samples=0.2,
bin_class_type=1,
random_state=12,
M=4,
max_depth=2,
noisy_split=False,
num_splits=2)
print(all_rf_weights_1iter['rf_weight1'])
rf.feature_importances_
rf_weight5 = np.ndarray.tolist(all_rf_weights['rf_weight1'])
rf_weight5
sorted([i for i, e in enumerate(rf_weight10) if e != 0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='RV'></a>
Step2: A random variable can be called as a function to return its value for a particular outcome in the probability space.
Step3: If no function is specified in RV, the default is the identity function, f(x) = x.
Step4: Like the probability space defintions, the RV command only defines a random variable. Simulation tools like .sim(), .tabulate(), .filter(), and .count() work for RV just like for ProbabilitySpace.
Step5: Random variables can also be specified by user defined functions.
Step6: <a id='RVevent'></a>
Step7: <a id='plot'></a>
Step8: For continuous random variables, .plot() returns a histogram, which provides the frequencies of simulated values falling in interval "bins". By default, the vertical axis of the histogram is on the density scale, so that areas of the bars correspond to relative frequencies. (Bin frequencies (counts) can be obtained using the normalize=False option.)
Step9: See the section on Symbulate graphics for more details on plotting options and functionality.
Step10: By definition, a random variable must always be a function defined on a probability space. Specifying a random variable by specifying its distribution, as in X = RV(Binomial(n=5, p=0.5)), has the effect of defining the probability space to be the distribution of the random variable and the function defined on this space to be the identity (f(x) = x). However, it is more appropriate to think of such a specification as defining a random variable with the given distribution on an unspecified probability space through an unspecified function.
Step11: However, while $X$ and $Y$ are two different random variables, they do have the same distribution over many outcomes.
Step12: <a id='transform'></a>
Step13: Example. If $U$ has a Uniform(0,1) distribution then $-\log(1-U)$ has an Exponential(1) distribution.
Step14: Example. A user defined function with .apply()
Step15: <a id='mean'></a>
Step16: If x denotes simulated values, then either x.mean() or mean(x) will return the mean of the values.
Step17: Recall that output can be formatted using print statements.
Step18: <a id='var'></a>
Step19: Using .var() is equivalent to the following.
Step20: Standard deviation, .sd(), is the square root of the variance. (As with mean, both x.sd() or sd(x) are allowed.)
Step21: <a id='standardize'></a>
|
<ASSISTANT_TASK:>
Python Code:
from symbulate import *
%matplotlib inline
P = BoxModel([0, 1], size=5)
X = RV(P, sum)
outcome = (0, 1, 0, 1, 1)
X(outcome)
P = Normal(mean=0, sd=1)
X = RV(P)
X(-0.5)
P = BoxModel([0, 1], size=5)
X = RV(P, sum)
values = X.sim(10000)
values
values.tabulate(normalize=True)
values.count_gt(3) / 10000
n = 10
labels = list(range(n))
# remember, Python starts the index at 0, so the cards are labebeled 0, ..., 9
def number_matches(x):
count = 0
for i in range(n):
if x[i] == labels[i]:
count += 1
return count
P = BoxModel(labels, size=n, replace=False)
X = RV(P, number_matches)
X.sim(10000)
P = BoxModel([0, 1], size=5)
X = RV(P, sum)
(X > 3).sim(10000).tabulate()
P = BoxModel([0, 1], size=5)
X = RV(P, sum)
X.sim(10000).plot()
P = Normal(mean=0, sd=1)
X = RV(P)
X.sim(10000).plot()
X = RV(Binomial(n=5, p=0.5))
X.sim(10000).plot()
P = BoxModel([1, 0], size = 5)
X = RV(P, sum)
Y = 5 - X
(X == Y).sim(10000).tabulate()
X.sim(10000).plot()
Y.sim(10000).plot(jitter = True)
X = RV(Normal(mean=0, var=1))
Y = exp(X) # same as X.apply(exp)
Y.sim(10000).plot()
U = RV(Uniform(a=0, b=1))
X = -log(1 - U)
X.sim(10000).plot(alpha=0.5)
RV(Exponential(1)).sim(10000).plot(alpha=0.5)
def g(x):
return min(abs(x - 1), 2)**2
X = RV(Exponential(rate = 1))
Y = X.apply(g)
Y.sim(10000).plot()
RV(Geometric(p=0.1)).sim(10000).mean()
x = RV(Geometric(p=0.1)).sim(10000)
mean(x)
print('An estimate of E(X) is: {:.2f}'.format(mean(x)))
X = RV(Exponential(rate=1/4))
x = X.sim(10000)
x.var()
mean((x - x.mean()) ** 2)
x.sd()
sqrt(x.var())
X = RV(Normal(mean=3, sd=2))
x = X.sim(10000)
x.mean(), x.sd()
z = x.standardize()
x.plot()
z.plot()
z.mean(), z.sd()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gaussian Mixture Models and Expectation Maximisation in Shogun
Step2: Set up the model in Shogun
Step3: Sampling from mixture models
Step4: Evaluating densities in mixture Models
Step5: Density estimating with mixture models
Step6: Imagine you did not know the true generating process of this data. What would you think just looking at it? There are clearly at least two components (or clusters) that might have generated this data, but three also looks reasonable. So let us try to learn a Gaussian mixture model on those.
Step7: So far so good, now lets plot the density of this GMM using the code from above
Step8: It is also possible to access the individual components of the mixture distribution. In our case, we can for example draw 95% ellipses for each of the Gaussians using the method from above. We will do this (and more) below.
Step9: Clustering with mixture models
Step10: These are clusterings obtained via the true mixture model and the one learned via EM. There is a slight subtlety here
Step11: Note how the lower left and middle cluster are overlapping in the sense that points at their intersection have similar likelihoods. If you do not care at all about this and are just interested in a partitioning of the space, simply choose the maximum.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
# import all Shogun classes
from modshogun import *
from matplotlib.patches import Ellipse
# a tool for visualisation
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
Returns an ellipse artist for nstd times the standard deviation of this
Gaussian, specified by mean and covariance
# compute eigenvalues (ordered)
vals, vecs = eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
# create mixture of three Gaussians
num_components=3
num_max_samples=100
gmm=GMM(num_components)
dimension=2
# set means (TODO interface should be to construct mixture from individuals with set parameters)
means=zeros((num_components, dimension))
means[0]=[-5.0, -4.0]
means[1]=[7.0, 3.0]
means[2]=[0, 0.]
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
# set covariances
covs=zeros((num_components, dimension, dimension))
covs[0]=array([[2, 1.3],[.6, 3]])
covs[1]=array([[1.3, -0.8],[-0.8, 1.3]])
covs[2]=array([[2.5, .8],[0.8, 2.5]])
[gmm.set_nth_cov(covs[i],i) for i in range(num_components)]
# set mixture coefficients, these have to sum to one (TODO these should be initialised automatically)
weights=array([0.5, 0.3, 0.2])
gmm.set_coef(weights)
# now sample from each component seperately first, the from the joint model
hold(True)
colors=["red", "green", "blue"]
for i in range(num_components):
# draw a number of samples from current component and plot
num_samples=int(rand()*num_max_samples)+1
# emulate sampling from one component (TODO fix interface of GMM to handle this)
w=zeros(num_components)
w[i]=1.
gmm.set_coef(w)
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_samples)])
plot(X[:,0], X[:,1], "o", color=colors[i])
# draw 95% elipsoid for current component
gca().add_artist(get_gaussian_ellipse_artist(means[i], covs[i], color=colors[i]))
hold(False)
_=title("%dD Gaussian Mixture Model with %d components" % (dimension, num_components))
# since we used a hack to sample from each component
gmm.set_coef(weights)
# generate a grid over the full space and evaluate components PDF
resolution=100
Xs=linspace(-10,10, resolution)
Ys=linspace(-8,6, resolution)
pairs=asarray([(x,y) for x in Xs for y in Ys])
D=asarray([gmm.cluster(pairs[i])[3] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,2,1)
pcolor(Xs,Ys,D)
xlim([-10,10])
ylim([-8,6])
title("Log-Likelihood of GMM")
subplot(1,2,2)
pcolor(Xs,Ys,exp(D))
xlim([-10,10])
ylim([-8,6])
_=title("Likelihood of GMM")
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_max_samples)])
plot(X[:,0], X[:,1], "o")
_=title("Samples from GMM")
def estimate_gmm(X, num_components):
# bring data into shogun representation (note that Shogun data is in column vector form, so transpose)
features=RealFeatures(X.T)
gmm_est=GMM(num_components)
gmm_est.set_features(features)
# learn GMM
gmm_est.train_em()
return gmm_est
component_numbers=[2,3]
# plot true likelihood
D_true=asarray([gmm.cluster(pairs[i])[num_components] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,len(component_numbers)+1,1)
pcolor(Xs,Ys,exp(D_true))
xlim([-10,10])
ylim([-8,6])
title("True likelihood")
for n in range(len(component_numbers)):
# TODO get rid of these hacks and offer nice interface from Shogun
# learn GMM with EM
gmm_est=estimate_gmm(X, component_numbers[n])
# evaluate at a grid of points
D_est=asarray([gmm_est.cluster(pairs[i])[component_numbers[n]] for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise densities
subplot(1,len(component_numbers)+1,n+2)
pcolor(Xs,Ys,exp(D_est))
xlim([-10,10])
ylim([-8,6])
_=title("Estimated likelihood for EM with %d components"%component_numbers[n])
# function to draw ellipses for all components of a GMM
def visualise_gmm(gmm, color="blue"):
for i in range(gmm.get_num_components()):
component=Gaussian.obtain_from_generic(gmm.get_component(i))
gca().add_artist(get_gaussian_ellipse_artist(component.get_mean(), component.get_cov(), color=color))
# multiple runs to illustrate random initialisation matters
for _ in range(3):
figure(figsize=(18,5))
subplot(1, len(component_numbers)+1, 1)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color="blue")
title("True components")
for i in range(len(component_numbers)):
gmm_est=estimate_gmm(X, component_numbers[i])
subplot(1, len(component_numbers)+1, i+2)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color=colors[i])
# TODO add a method to get likelihood of full model, retraining is inefficient
likelihood=gmm_est.train_em()
_=title("Estimated likelihood: %.2f (%d components)"%(likelihood,component_numbers[i]))
def cluster_and_visualise(gmm_est):
# obtain cluster index for each point of the training data
# TODO another hack here: Shogun should allow to pass multiple points and only return the index
# as the likelihood can be done via the individual components
# In addition, argmax should be computed for us, although log-pdf for all components should also be possible
clusters=asarray([argmax(gmm_est.cluster(x)[:gmm.get_num_components()]) for x in X])
# visualise points by cluster
hold(True)
for i in range(gmm.get_num_components()):
indices=clusters==i
plot(X[indices,0],X[indices,1], 'o', color=colors[i])
hold(False)
# learn gmm again
gmm_est=estimate_gmm(X, num_components)
figure(figsize=(18,5))
subplot(121)
cluster_and_visualise(gmm)
title("Clustering under true GMM")
subplot(122)
cluster_and_visualise(gmm_est)
_=title("Clustering under estimated GMM")
figure(figsize=(18,5))
for comp_idx in range(num_components):
subplot(1,num_components,comp_idx+1)
# evaluated likelihood under current component
# TODO Shogun should do the loop and allow to specify component indices to evaluate pdf for
# TODO distribution interface should be the same everywhere
component=Gaussian.obtain_from_generic(gmm.get_component(comp_idx))
cluster_likelihoods=asarray([component.compute_PDF(X[i]) for i in range(len(X))])
# normalise
cluster_likelihoods-=cluster_likelihoods.min()
cluster_likelihoods/=cluster_likelihoods.max()
# plot, coloured by likelihood value
cm=get_cmap("jet")
hold(True)
for j in range(len(X)):
color = cm(cluster_likelihoods[j])
plot(X[j,0], X[j,1] ,"o", color=color)
hold(False)
title("Data coloured by likelihood for component %d" % comp_idx)
# compute cluster index for every point in space
D_est=asarray([gmm_est.cluster(pairs[i])[:num_components].argmax() for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise clustering
cluster_and_visualise(gmm_est)
# visualise space partitioning
hold(True)
pcolor(Xs,Ys,D_est)
hold(False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sample Application
Step2: Simple DataTable
Step4: Add Filtering
Step5: Links
|
<ASSISTANT_TASK:>
Python Code:
from bokeh.io import output_file
output_file('/tmp/bokeh_notebook.html')
from pandas_datareader import wb
indicators_df = wb.get_indicators()
indicators_df['sourceOrganization'] = indicators_df['sourceOrganization'].str.decode("utf-8")
indicators_df.head()
from bokeh.io import show, vform
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.models.layouts import HBox
from bokeh.models.widgets import DataTable, TableColumn, StringFormatter, DateFormatter, Select
import pandas as pd
table_columns = [
TableColumn(field='id', title='ID'),
TableColumn(field='name', title='Name'),
TableColumn(field='source', title='Source'),
TableColumn(field='sourceNote', title='Source Note'),
TableColumn(field='sourceOrganization', title='Source Organization'),
TableColumn(field='topics', title='Topics'),
]
column_names = [tc.field for tc in table_columns]
table_source = ColumnDataSource(indicators_df[column_names])
data_table = DataTable(source=table_source, columns=table_columns, height=400, editable=False)
show(vform(data_table))
# two widgets
source_list = ['ALL'] + sorted(indicators_df['source'].unique().tolist())
source_select = Select(title="Source:", value=source_list[0], options=source_list)
topic_list = ['ALL'] + sorted(set([topic.strip() for topics in indicators_df['topics'].unique().tolist()
for topic in topics.split(';') if topic.strip()]))
topic_select = Select(title="Topic:", value=topic_list[0], options=topic_list)
# make a copy of the original data - "immutable"
original_table_source = ColumnDataSource(indicators_df[column_names])
arg_dct = dict(
source=table_source,
original_source=original_table_source,
source_select=source_select,
topic_select=topic_select,
target_obj=data_table
)
# filtering done in JS
callback_code =
var data = source.get('data');
var original_data = original_source.get('data');
var source_name = source_select.get('value');
var topic_name = topic_select.get('value');
// now construct the new data object based on the filtered values
for (var key in original_data) {
data[key] = [];
for (var i = 0; i < original_data["id"].length; ++i) {
if ((source_name === "ALL" || original_data["source"][i] === source_name) &&
(topic_name === "ALL" || original_data["topics"][i].indexOf(topic_name) > -1)) {
data[key].push(original_data[key][i]);
}
}
}
target_obj.trigger('change');
source.trigger('change');
generic_callback = CustomJS(args=arg_dct, code=callback_code)
source_select.callback = generic_callback
topic_select.callback = generic_callback
filter_widgets = HBox(children=[source_select, topic_select])
show(vform(filter_widgets, data_table))
from bokeh.models.widgets.tables import HTMLTemplateFormatter
try:
from urllib import quote
except ImportError as e:
from urllib.parse import quote
name_template_str = '<a target="_blank" href="<%= url %>"><%= value %></a>'
table_columns = [
TableColumn(field='id', title='ID'),
TableColumn(field='name', title='Name', formatter=HTMLTemplateFormatter(template=name_template_str)),
TableColumn(field='source', title='Source'),
TableColumn(field='sourceNote', title='Source Note'),
TableColumn(field='sourceOrganization', title='Source Organization'),
TableColumn(field='topics', title='Topics'),
]
column_names = [tc.field for tc in table_columns]
df = indicators_df.copy()
df['url'] = df['name'].apply(lambda s: 'http://search.worldbank.org/all?qterm={term}&language=EN'.format(term=quote(s)))
table_source = ColumnDataSource(df)
data_table = DataTable(source=table_source, columns=table_columns, height=400, editable=False)
show(vform(data_table))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So in conclusion…
Step4: Python does not have interfaces. If it did, it would look something like this. Interfaces can be imitated with a class.
Step5: If one tries to make an instance of AbstactShape and calls obj.draw() an ImplementationError is raised because the shape is abstract. It has no idea what to draw.
Step8: If one tries to make an instance of Cirlce and calls cirlce.draw() an ImplementationError is not raise because the shape is no longer abstract.
Step9: Create a list of shapes by instantiating Circle and Square.
Step10: Invoke bound draw method on each shape instance.
|
<ASSISTANT_TASK:>
Python Code:
%%HTML
<div style="background-color:#d9edf7;color:#31708;border-color:#bce8f1;padding: 15px;margin-bottom: 20px;border: 1px; border-radius:4px;">
<strong>psittacism: </strong> <p>automatic speech without thought of the meaning of the words spoken</p>
<p>New Latin psittacismus, from Latin psittacus parrot + -ismus -ism</p>
<citation>Psittacism. (n.d.). Retrieved August 24, 2016, from http://www.merriam-webster.com/dictionary/psittacism</citation>
</div>
%%HTML
<a title="By John Moose (originally posted to Flickr as Moluccan Cockatoo) [CC BY 2.0 (http://creativecommons. org/licenses/by/2.0)], via Wikimedia Commons" href="https://commons.wikimedia.org/wiki/File%3ACacatua_moluccensis_-Cincinnati_Zoo-8a.jpg"><img width="256" alt="Cacatua moluccensis -Cincinnati Zoo-8a" src="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b8/Cacatua_moluccensis_-Cincinnati_Zoo-8a.jpg/256px-Cacatua_moluccensis_-Cincinnati_Zoo-8a.jpg"/></a>
import inspect
class AbstractShape:
Abstract (base) class for shapes.
def draw(self, ):
Draw shape.
error_message = "Method {} not defined. Abstract shapes have no idea what to draw!"\
.format(self.draw)
raise NotImplementedError(error_message)
try:
shape = AbstractShape()
shape.draw()
except NotImplementedError as e:
print(Exception(e))
class Circle(AbstractShape):
Circle implements AbstactShape.
def __init__(self, ):
print("My ancestors are {}".format(inspect.getmro(Circle)))
def draw(self, ):
print("An instance of {} {} is drawing itself via {}."\
.format(super(), self, self.draw))
print("<circle appears>")
circle = Circle()
circle.draw()
class Square(AbstractShape):
Square implements AbstactShape.
def __init__(self, ):
print("My ancestors are {}".format(inspect.getmro(Square)))
def draw(self, ):
print("An instance of {} {} is drawing itself via {}."\
.format(super(), self, self.draw))
print("<square appears>")
square = Square()
square.draw()
shapes = [class_() for class_ in (Circle, Square)]
print(shapes)
for shape in shapes:
shape.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Functions
Step3: We see that the line add(1, 2) is outside the function and so is executed. We can also call the function repeatedly
Step4: The lengthy comment at the start of the function is very useful to remind yourself later what the function should do. You can see this information by typing
Step5: You can also view this in spyder by typing add in the Object window of the Help tab in the top right.
Step7: if statements and flow control
Step8: We see that the Visual Basic If statement becomes the lower case if, and the ElseIf is contracted to elif. The condition (in this case!) compares a variable, count, to a number, using the equality comparison ==. Once again, as in the case of functions, the line containing the if definition is ended with a colon (
Step9: Loops
Step10: This is tedious and there's a high chance of errors.
Step11: The syntax has similarities to the syntax for functions. The line defining the loop starts with for, specifies the values that n takes, and ends with a colon. The code that is executed inside the loop is indented.
Step12: We see that
Step13: This is very often used in Python code
Step14: We see that to access individual entries we use square brackets and the number of the entry, starting from $0$. All Python tuples and lists start from $0$. To check that it cannot be modified
Step15: We can use slicing to access many entries at once
Step16: As with the range function, the notation <start>
Step17: Lists
Step18: The same slicing notation can be used, and now can be used to assignment
Step19: Crucially, lists and tuples can contain anything. As with loops, there is no restriction on types, and things can be nested
Step20: Dictionaries
Step21: As there is no order we access dictionaries using the key. To loop over a dictionary, we take advantage of Python's loose iteration rules
Step22: There is a shortcut to allow you to get both key and value in one go
Step23: Exercise
Step24: Numpy arrays
Step25: Accessing elements of numpy arrays is very similar to accessing elements of lists, but with slightly less typing. To access elements from an n-dimensional list, we have to use multiple square brackets, e.g. l[0][4][7][8]. For a numpy array, we separate the indices using a comma
Step26: Let's say we now want to square every element of the array. For this 2d list, we would need a for loop
Step27: Note that here we used the function deepcopy from the copy module the copy the list l. If we had simply used squared = l, when we the assigned the elements of squared new values, this would also have changed the values in l. This is in contrast to the simple variables we saw before, where changing the value of one will leave the values of others unchanged.
Step28: Numpy has a range of array manipulation routines for rearranging and manipulating elements, such as those below.
Step29: If you've used Matlab before, you may be familiar with logical indexing. This is a way of accessing elements of a array that satisfy some criteria, e.g. all the elements which are greater than 0. We can also do this with numpy arrays using boolean array indexing
|
<ASSISTANT_TASK:>
Python Code:
def add(x, y):
Add two numbers
Parameters
----------
x : float
First input
y : float
Second input
Returns
-------
x + y : float
return x + y
add(1, 2)
print(add(3, 4))
print(add(10.61, 5.99))
help(add)
import script2
script2.add(1, 2)
count = 0
if count == 0:
message = "There are no items."
elif count == 1:
message = "There is 1 item."
else:
message = "There are" + count + " items."
print(message)
def fibonacci(n):
if n == 1 or n == 2:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
print('F_1 = ', fibonacci(1))
print('F_2 = ', fibonacci(2))
print('F_5 = ', fibonacci(5))
print('F_10 = ', fibonacci(10))
print(add(3, 1))
print(add(3, 2))
print(add(3, 3))
print(add(3, 4))
print(add(3, 5))
for n in 1, 2, 3, 4, 5:
print(add(3, n))
print("Loop has ended")
for n in range(1, 6):
print("n =", n)
for m in range(3):
print("m =", m)
for k in range(2, 7, 2):
print("k =", k)
for thing in 1, 2.5, "hello", add:
print("thing is ", thing)
t1 = (0, 1, 2, 3, 4, 5)
print(t1[0])
print(t1[3])
t1[0] = 1
print(t1[1:4])
print(t1[-1])
l1 = [0, 1, 2, 3, 4, 5]
print(l1[3])
l1[3] = 7
print(l1[3])
l1.append(6)
print(l1)
l1[0:2] = l1[4:6]
print(l1)
l2 = [0, 1.2, "hello", ["a", 3, 4.5], (0, (1.1, 2.3, 4))]
print(l2[1])
print(l2[3][0])
d1 = {"omega": 1.0, "Gamma": 5.7, "N": 100}
print(d1["Gamma"])
for key in d1:
print("Key is", key, "value is", d1[key])
for key, value in d1.items():
print("Key is", key, "value is", value)
boaty = {'first name' : 'Boaty',
'last name' : 'McBoatface',
'student ID' : 123456,
'project' : 'Surveying the arctic ocean'}
def f_name(d):
print("My name is {} {}".format(d['first name'], d['last name']))
def f_project(d):
print("Student {} is doing project {}".format(d['student ID'], d['project']))
f_name(boaty)
f_project(boaty)
import numpy
# python list
l = [[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]
a = numpy.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
print('list l = {}'.format(l))
print('numpy array a = {}'.format(a))
print(l[1][2])
print(a[1,2])
import copy
squared = copy.deepcopy(l)
for i in range(3):
for j in range(3):
squared[i][j] = l[i][j]**2
print(squared)
print(a**2)
# transpose
a.T
# reshape
numpy.reshape(a, (1,9))
# stack arrays horizontally
numpy.hstack((a,a,a))
a[a > 5]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: datetime.datetime 클래스
Step2: Instance Attributes 속성
Step3: 문자열 변환
Step4: format 기호
Step5: datetime, date, time 변환
Step6: timedelta와 날짜시간 연산
Step7: timedelta 클래스
Step8: time 패키지
Step9: time 패키지의 시간 표시
Step10: pytz 패키지
Step11: datetime 객체에 시간대 정보 넣기
Step12: 타시간대로 변환
Step13: http
|
<ASSISTANT_TASK:>
Python Code:
import datetime
dt = datetime.datetime.now()
dt
type(dt)
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo
dt.weekday() # {0:월, 1:화, 2:수, 3:목, 4:금, 5:토, 6:일}
dt1 = datetime.datetime.strptime("2015-12-31 11:32", "%Y-%m-%d %H:%M")
dt1
dt1.strftime("%d/%m/%y")
dt1.strftime("%A %d. %B %Y")
s = dt1.strftime(u"%Y년 %m월 %d일 %H시 %M분 %S초".encode("utf-8"))
print(s)
dt = datetime.datetime.now()
dt
dt.date(), dt.time()
d = datetime.date(2015, 12, 31)
d
t = datetime.time(11, 31, 29)
t
datetime.datetime.combine(d, t)
dt1 = datetime.datetime(2016, 2, 19, 14)
dt2 = datetime.datetime(2016, 1, 2, 13)
td = dt1 - dt2
td
td.days, td.seconds, td.microseconds
td.total_seconds()
import time
print("start...")
time.sleep(1)
print(1)
time.sleep(1)
print(2)
time.sleep(1)
print(3)
time.sleep(1)
print(4)
time.sleep(1)
print("finish!")
time.time()
ts = time.localtime()
ts
time.mktime(ts)
import pytz
seoul = pytz.timezone("Asia/Seoul")
t1 = datetime.datetime.now()
t1
lt1 = seoul.localize(t1)
lt1
t2 = datetime.datetime.utcnow()
t2
lt2 = pytz.utc.localize(t2)
lt2
lt2 = t2.replace(tzinfo=pytz.utc)
lt2
t1 = datetime.datetime.now()
lt1 = seoul.localize(t1)
lt3 = lt1.astimezone(pytz.timezone("US/Eastern"))
lt3
from dateutil.parser import parse
parse('2016-04-16')
parse("Apr 16, 2016 04:05:32 PM")
parse('6/7/2016')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Distributed training with Keras
Step2: Download the dataset
Step3: Define the distribution strategy
Step4: Set up the input pipeline
Step5: Define a function that normalizes the image pixel values from the [0, 255] range to the [0, 1] range (feature scaling)
Step6: Apply this scale function to the training and test data, and then use the tf.data.Dataset APIs to shuffle the training data (Dataset.shuffle), and batch it (Dataset.batch). Notice that you are also keeping an in-memory cache of the training data to improve performance (Dataset.cache).
Step7: Create the model
Step8: Define the callbacks
Step9: Train and evaluate
Step10: Check for saved checkpoints
Step11: To check how well the model performs, load the latest checkpoint and call Model.evaluate on the test data
Step12: To visualize the output, launch TensorBoard and view the logs
Step13: <!-- <img class="tfo-display-only-on-site" src="images/tensorboard_distributed_training_with_keras.png"/> -->
Step14: Export to SavedModel
Step15: Now, load the model without Strategy.scope
Step16: Load the model with Strategy.scope
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow_datasets as tfds
import tensorflow as tf
import os
# Load the TensorBoard notebook extension.
%load_ext tensorboard
print(tf.__version__)
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
# You can also do info.splits.total_num_examples to get the total
# number of examples in the dataset.
num_train_examples = info.splits['train'].num_examples
num_test_examples = info.splits['test'].num_examples
BUFFER_SIZE = 10000
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
train_dataset = mnist_train.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
eval_dataset = mnist_test.map(scale).batch(BATCH_SIZE)
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
# Define the checkpoint directory to store the checkpoints.
checkpoint_dir = './training_checkpoints'
# Define the name of the checkpoint files.
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
# Define a function for decaying the learning rate.
# You can define any decay function you need.
def decay(epoch):
if epoch < 3:
return 1e-3
elif epoch >= 3 and epoch < 7:
return 1e-4
else:
return 1e-5
# Define a callback for printing the learning rate at the end of each epoch.
class PrintLR(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
print('\nLearning rate for epoch {} is {}'.format( epoch + 1, model.optimizer.lr.numpy()))
# Put all the callbacks together.
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir='./logs'),
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix,
save_weights_only=True),
tf.keras.callbacks.LearningRateScheduler(decay),
PrintLR()
]
EPOCHS = 12
model.fit(train_dataset, epochs=EPOCHS, callbacks=callbacks)
# Check the checkpoint directory.
!ls {checkpoint_dir}
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
eval_loss, eval_acc = model.evaluate(eval_dataset)
print('Eval loss: {}, Eval accuracy: {}'.format(eval_loss, eval_acc))
%tensorboard --logdir=logs
!ls -sh ./logs
path = 'saved_model/'
model.save(path, save_format='tf')
unreplicated_model = tf.keras.models.load_model(path)
unreplicated_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
eval_loss, eval_acc = unreplicated_model.evaluate(eval_dataset)
print('Eval loss: {}, Eval Accuracy: {}'.format(eval_loss, eval_acc))
with strategy.scope():
replicated_model = tf.keras.models.load_model(path)
replicated_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
eval_loss, eval_acc = replicated_model.evaluate(eval_dataset)
print ('Eval loss: {}, Eval Accuracy: {}'.format(eval_loss, eval_acc))
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.