text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
freqs = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = freqs / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and the inter-trial coherence
power, itc = source_induced_power(
this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
itc = np.mean(itc, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(itc,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('ITC (%s)' % title)
plt.colorbar()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({
'Column1': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'Column2': [4, 3, 6, 8, 3, 4, 1, 4, 3],
'Column3': [7, 3, 3, 1, 2, 2, 3, 2, 7],
'Column4': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'Column5': [1, 1, 1, 1, 1, 1, 1, 1, 1]})
def get_relation(df, col1, col2):
first_max = df[[col1, col2]].groupby(col1).count().max()[0]
second_max = df[[col1, col2]].groupby(col2).count().max()[0]
if first_max==1:
if second_max==1:
return 'one-to-one'
else:
return 'one-to-many'
else:
if second_max==1:
return 'many-to-one'
else:
return 'many-to-many'
def g(df):
result = pd.DataFrame(index=df.columns, columns=df.columns)
for col_i in df.columns:
for col_j in df.columns:
if col_i == col_j:
continue
result.loc[col_i, col_j] = get_relation(df, col_i, col_j)
return result
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Line plot of sunspot data
Step2: Use np.loadtxt to read the data into a NumPy array called data. Then create two new 1d NumPy arrays named years and ssc that have the sequence of year and sunspot counts.
Step3: Make a line plot showing the sunspot count as a function of year.
Step4: Describe the choices you have made in building this visualization and how they make it effective.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
assert os.path.isfile('yearssn.dat')
# YOUR CODE HERE
data = np.loadtxt("yearssn.dat")
year = data[:,0]
ssc = data[:,1]
print(year)
print(ssc)
assert len(year)==315
assert year.dtype==np.dtype(float)
assert len(ssc)==315
assert ssc.dtype==np.dtype(float)
# YOUR CODE HERE
#http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html
fig = plt.figure(figsize=(12,1))
ax = fig.add_subplot(1, 1, 1)
ax.set_title("Sunspot Activity")
ax.plot(year, ssc)
plt.xlabel("Year")
plt.ylabel("Sunspot Count")
ax.grid(True)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
assert True # leave for grading
print(year[0:100])
print(len(year[0:100]))
print(len(ssc[0:100]))
# YOUR CODE HERE
fig, ax = plt.subplots(4, 1, figsize=(12,12))
plt.sca(ax[0])
plt.plot(year[0:100], ssc[0:100])
plt.ylabel("Sunspots")
plt.xlabel("Year")
plt.sca(ax[1])
plt.plot(year[100:200], ssc[100:200])
plt.ylabel("Sunspots")
plt.xlabel("Year")
plt.sca(ax[2])
plt.plot(year[200:300], ssc[200:300])
plt.ylabel("Sunspots")
plt.xlabel("Year")
plt.sca(ax[3])
plt.plot(year[300:-1], ssc[300:-1])
plt.ylabel("Sunspots")
plt.xlabel("Year")
plt.xlim([2000, 2100])
assert True # leave for grading
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It contains the package and class name as well as diverse measures that show use the coverage.
Step2: Nice, Pandas recognizes the format of the CSV file automagically!
Step3: Let's add some custom measures. For the size of class, we simply add the line information together. Based upon this, we can also calculate the ratio between covered lines and all lines in our code. With this information, we can see which features are used (high values) and which are not in use (low values).
Step4: That's all we need! Let's get to the interesting stuff!
Step5: By grouping all the classes by their feature and by forming the average of all line covered ratios, we can approximate the feature usage.
Step6: We can see that the "Vet" feature isn't used very often. If we want to know which classes are affected, we can filter depending on the corresponding feature.
Step7: Based on this list, we can approximate how much code we could save if we delete that feature / all the classes.
Step8: Result
Step9: Looks good! Then, same game as before, we simply group the classes by the new information.
Step10: There is one part that isn't used at all
Step11: Again, we also calculate the part of these classes regarding our whole application.
Step12: Result
Step13: 42! I mean, this is the answer! The answer to (keeping legacy systems a-) life
Step14: OK, let's transform our data frame into the hierarchical JSON format "flare" that is needed by D3 to visualize the bubble chart. We just match our columns to more generic names that are used by D3 later on for displaying the various measures
Step15: Finally, we save the produced JSON data into a file for later displaying.
|
<ASSISTANT_TASK:>
Python Code:
JACOCO_CSV_FILE = r'input/spring-petclinic/jacoco.csv'
with open (JACOCO_CSV_FILE) as log:
[print(line, end='') for line in log.readlines()[:4]]
import pandas as pd
coverage= pd.read_csv(JACOCO_CSV_FILE)
coverage.head(3)
coverage = coverage[['PACKAGE', 'CLASS', 'LINE_MISSED', 'LINE_COVERED']]
coverage.head()
coverage['line_size'] = coverage['LINE_MISSED'] + coverage['LINE_COVERED']
coverage['line_covered_ratio'] = coverage['LINE_COVERED'] / coverage['line_size']
coverage.head()
features = ['Owner', 'Pet', 'Visit', 'Vet', 'Specialty', 'Clinic']
for feature in features:
coverage.ix[coverage['CLASS'].str.contains(feature), 'feature'] = feature
coverage.ix[coverage['feature'].isnull(), 'feature'] = "Framework"
coverage[['CLASS', 'feature']].head()
feature_usage = coverage.groupby('feature').mean().sort_values(by='line_covered_ratio')[['line_covered_ratio']]
feature_usage
classes_to_delete_by_feature = coverage[coverage['feature'] == feature_usage.index[0]][['PACKAGE', 'CLASS', 'line_covered_ratio', 'line_size']]
classes_to_delete_by_feature
classes_to_delete_by_feature['line_size'].sum() / coverage['line_size'].sum()
coverage['technology'] = coverage['PACKAGE'].str.split(".").str.get(-1)
coverage[['PACKAGE', 'technology']].head()
technology_usage = coverage.groupby('technology').mean().sort_values(by='line_covered_ratio')[['line_covered_ratio']]
technology_usage
classes_to_delete_by_technology = coverage[coverage['technology'] == technology_usage.index[0]][['PACKAGE', 'CLASS', 'line_covered_ratio', 'line_size']]
classes_to_delete_by_technology
classes_to_delete_by_technology['line_size'].sum() / coverage['line_size'].sum()
print("{:.0%}".format(
(classes_to_delete_by_feature['line_size'].sum() +
classes_to_delete_by_technology['line_size'].sum()) /
coverage['line_size'].sum()))
import matplotlib.cm as cm
import matplotlib.colors
def assign_rgb_color(value):
color_code = cm.coolwarm(value)
return matplotlib.colors.rgb2hex(color_code)
plot_data = coverage.copy()
plot_data['color'] = plot_data['line_covered_ratio'].apply(assign_rgb_color)
plot_data[['line_covered_ratio', 'color']].head(5)
import json
def create_flare_json(data,
column_name_with_hierarchical_data,
separator=".",
name_column="name",
size_column="size",
color_column="color"):
json_data = {}
json_data['name'] = 'flare'
json_data['children'] = []
for row in data.iterrows():
series = row[1]
hierarchical_data = series[column_name_with_hierarchical_data]
last_children = None
children = json_data['children']
for part in hierarchical_data.split(separator):
entry = None
# build up the tree
for child in children:
if "name" in child and child["name"] == part:
entry = child
if not entry:
entry = {}
children.append(entry)
# create a new entry section
entry['name'] = part
if not 'children' in entry:
entry['children'] = []
children = entry['children']
last_children = children
# add data to leaf node
last_children.append({
'name' : series[name_column],
'size' : series[size_column],
'color' : series[color_column]
})
return json_data
json_data = create_flare_json(plot_data, "PACKAGE", ".", "CLASS", "line_size")
print(json.dumps(json_data, indent=3)[0:1000])
FLARE_JSON_FILE = r'vis/flare.json'
with open (FLARE_JSON_FILE, mode='w', encoding='utf-8') as json_file:
json_file.write(json.dumps(json_data, indent=3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The image of this example is an IFU science exposure taken with GMOS Hamamatsu CCD
Step2: limpia_cosmicos calls la_cosmic from P. van Dokkum
|
<ASSISTANT_TASK:>
Python Code:
# Astro-example
####################
#Cosmic cleanning of an image of 12 detectors
#Uses L.A. Cosmic from P. van Dokkum
#Requires python version of la_cosmic
#http://www.astro.yale.edu/dokkum/lacosmic/
#it also uses Parallel python PP
##################
#
# La_cosmic must be in the same directory
#
#
import sys, os, string
import astropy
from astropy.io import fits
import numpy as np
import pyfits
import scipy
from scipy import ndimage
# IMPORTING La Cosmic
import cosmics
#time
import time
# Paralel python
import pp
import matplotlib.pyplot as plt
%matplotlib inline
imagen_in = "python_club_gemini.fits"
def separa_Amps(imagen_a_corregir):
#header = pyfits.getheader(imagen_a_corregir)
# Creating arrays
imagen = []
header_amp = []
Amp = []
for i in range(1, 13):
print i
# Reading each amp.
# and creating 12 images, where each one of them corresponds to an Amplifier
header_amp.append(pyfits.getheader(imagen_a_corregir, 'sci', i))
imagen.append(pyfits.getdata(imagen_a_corregir, 'sci', i))
Amp.append(str(imagen_a_corregir[:-5] + "_Amp_" + str(i) + ".fits"))
for k in range(0, 12):
print "writing", Amp[k]
pyfits.writeto(Amp[k], imagen[k], header_amp[k])
return
print "Splittings Amps"
start_time1 = time.time()
separa_Amps(imagen_in)
start_time2 = time.time()
image_data=pyfits.getdata("python_club_gemini_Amp_7.fits")
print type(image_data)
print image_data.shape
from matplotlib.colors import LogNorm
plt.imshow(image_data,cmap="gray",aspect='auto',norm=LogNorm(vmin=30.08,vmax=1000.1,clip=True))
plt.colorbar()
def limpia_cosmicos(imagen_a_corregir, N_Amp):
# imagen_a_corregir=imagen_in
#la cosmic routine
import cosmics
array, header = cosmics.fromfits(str(imagen_a_corregir[:-5] + "_Amp_" + str(N_Amp) + ".fits"))
gain_h = header["GAIN"] # READING GAIN AND NOISE FROM HEADERS
rnoise = header["RDNOISE"]
print "READING GAIN AND NOISE ", gain_h, rnoise, str(imagen_a_corregir[:-5] + "_Amp_" + str(N_Amp) + ".fits")
#Edit this for SCI or STD or Images
c = cosmics.cosmicsimage(array, gain=gain_h, readnoise=rnoise, sigclip=3, sigfrac=1, objlim=2.0)
# There are other options, check the manual la_cosmnic
# Run the full artillery 7 iterations is enough:
c.run(maxiter=7)
# Write the cleaned image into a new FITS file, conserving the original header :
cleaned = str(imagen_a_corregir[:-5] + "_CC2_" + str(N_Amp) + ".fits")
cosmics.tofits(cleaned, c.cleanarray, header)
start_time1 = time.time()
start_time2 = time.time()
#print "iterations for cosmic rejection..."
ppservers = ()
ncpus = 4
# Creates jobserver with ncpus workers
job_server = pp.Server(ncpus, ppservers=ppservers)
print "Starting pp with", job_server.get_ncpus(), "workers aka CPUs"
#Fuerza Bruta....
Amplific= (1,2,3,4,5,6,7,8,9,10,11,12)
jobs=[(input,job_server.submit(limpia_cosmicos,(imagen_in,input,))) for input in Amplific]
for input, job in jobs:
print "executing job N", input, "is", job()
job_server.print_stats()
print "Total time elapsed: ", time.time() - start_time2, "s"
image_data=pyfits.getdata("python_club_gemini_CC2_7.fits")
print type(image_data)
print image_data.shape
from matplotlib.colors import LogNorm
image_data1=pyfits.getdata("python_club_gemini_CC2_7.fits")
plt.imshow(image_data1,cmap="gray",aspect='auto',norm=LogNorm(vmin=30.08,vmax=1000.1,clip=True))
#plt.imshow(image_data,cmap="gray",norm=LogNorm(vmin=0.1,vmax=0))
plt.colorbar()
image_data2=pyfits.getdata("python_club_gemini_Amp_7.fits")
plt.imshow(image_data2,cmap="gray",aspect='auto',norm=LogNorm(vmin=30.08,vmax=1000.1,clip=True))
plt.colorbar()
#Updating image...
imagen_in = "python_club_gemini.fits"
imagen_end = "python_club_gemini.fits"
imagen_a_corregir=imagen_in
FILE_FIN=imagen_end+"_CC2.fits" #imagen final
FILE_TEMP=imagen_end+"_tmp.fits" #imagen Temporal donde se hacen los cambios
imagen_header_original=imagen_in # Imagen Original con todos los headers,
import shutil
# making a copy of the priginal file to a temporal one.
shutil.copyfile(imagen_header_original,FILE_TEMP)
imagen_header2=FILE_TEMP
for i in range (1,13):
imagen_Corre=str(imagen_a_corregir[:-5]+"_CC2_"+str(i)+".fits") # Output of correction "CC2", NAME_CC2.fits
imagen_no_header=pyfits.getdata(imagen_Corre,header=False)
header=pyfits.getheader(FILE_TEMP)
header2=pyfits.getheader(FILE_TEMP,i)
pyfits.update(imagen_header2,imagen_no_header,ext=i) #copia la imagen en si a la extension correspondiente
pyfits.update(imagen_header2,imagen_no_header,header2,i) #copia el header original a la extension correspondiente
import shutil
shutil.copyfile(FILE_TEMP,FILE_FIN)##
##Final Sanity check
#pyfits.info(FILE_FIN)
#print "Deleting files"
#print "Deleting _CC_ files and _Amp_ files"
#for m in range (1,13):
# print " rm "+imagen_a_corregir[:-5]+"_CC2_"+str(m)+".fits"
# os.remove(imagen_a_corregir[:-5]+"_CC2_"+str(m)+".fits")
# print " rm "+imagen_a_corregir[:-5]+"_Amp_"+str(m)+".fits"
# os.remove(imagen_a_corregir[:-5]+"_Amp_"+str(m)+".fits")
print "Cosmic cleaning done"
print "\n Marcelo D. Mora, 07.09.2016 mmora@astro.puc.cl v.2.0 \n"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25)
model = LinearRegression()
model.fit(X_train, y_train)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'awi-cm-1-0-hr', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Words in Space
Step2: Vectorize the Documents
Step3: About t-SNE
Step4: Euclidean Distance
Step5: As you've probably heard, Euclidean distance is not an ideal choice for any sparse data (also this). That's because when we vectorize a corpus, we end up with huge, sparse vectors. That means that it's sort of a crapshoot as to whether the most informative features (e.g. words) will vary in a way that will be captured by Euclidean distance. We can see in the above that Euclidean distance hasn't done a terrible job of spacially differentiating the different categories of documents; the "sports" and "cooking" clusters look pretty clear. However there's a lot of overlap and muddling of the other categories.
Step6: Minkowski Distance
Step7: Bray Curtis Dissimilarity
Step8: Note the weird beauty mark in the plot above. We'll see this in several of the plots below.
Step9: Another beauty mark!
Step10: Chebyshev does seem to produce a beauty mark also, but it's less distinct from the rest of the points.
Step11: Danger!
Step12: Notice the crescent shape!
Step13: Again we see the crescent shape we saw in Jaccard.
Step14: Another crescent!
Step15: Another crescent!
Step16: Sokal Michener
Step17: Rogers Tanimoto
Step18: Hamming
Step19: Hehe.
Step20: Mahalanobis Distance
Step21: Standardized Euclidean Distance
Step22: Yule
|
<ASSISTANT_TASK:>
Python Code:
import os
from sklearn.datasets.base import Bunch
from yellowbrick.download import download_all
## The path to the test data sets
FIXTURES = os.path.join(os.getcwd(), "data")
## Dataset loading mechanisms
datasets = {
"hobbies": os.path.join(FIXTURES, "hobbies")
}
def load_data(name, download=True):
Loads and wrangles the passed in text corpus by name.
If download is specified, this method will download any missing files.
# Get the path from the datasets
path = datasets[name]
# Check if the data exists, otherwise download or raise
if not os.path.exists(path):
if download:
download_all()
else:
raise ValueError((
"'{}' dataset has not been downloaded, "
"use the download.py module to fetch datasets"
).format(name))
# Read the directories in the directory as the categories.
categories = [
cat for cat in os.listdir(path)
if os.path.isdir(os.path.join(path, cat))
]
files = [] # holds the file names relative to the root
data = [] # holds the text read from the file
target = [] # holds the string of the category
# Load the data from the files in the corpus
for cat in categories:
for name in os.listdir(os.path.join(path, cat)):
files.append(os.path.join(path, cat, name))
target.append(cat)
with open(os.path.join(path, cat, name), 'r') as f:
data.append(f.read())
# Return the data bunch for use similar to the newsgroups example
return Bunch(
categories=categories,
files=files,
data=data,
target=target,
)
corpus = load_data('hobbies')
import matplotlib.pyplot as plt
from yellowbrick.text import TSNEVisualizer
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
docs = vectorizer.fit_transform(corpus.data)
labels = corpus.target
tsne = TSNEVisualizer()
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="euclidean")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="cityblock")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="minkowski")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="braycurtis")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="canberra")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="chebyshev")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="cosine")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="jaccard")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="dice")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="kulsinski")
tsne.fit(docs, labels)
tsne.poof()
# russellrao
tsne = TSNEVisualizer(metric="russellrao")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="sokalsneath")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="sokalmichener")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="rogerstanimoto")
tsne.fit(docs, labels)
tsne.poof()
tsne = TSNEVisualizer(metric="hamming")
tsne.fit(docs, labels)
tsne.poof()
# tsne = TSNEVisualizer(metric="correlation")
# tsne.fit(docs, labels)
# tsne.poof()
tsne = TSNEVisualizer(metric="mahalanobis", method='exact')
tsne.fit(docs, labels)
tsne.poof()
# tsne = TSNEVisualizer(metric="seuclidean")
# tsne.fit(docs, labels)
# tsne.poof()
# tsne = TSNEVisualizer(metric="yule")
# tsne.fit(docs, labels)
# tsne.poof()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The raw data, expressed as percentages. We will divide by 100
Step3: The regression model is a two-way additive model with
Step4: Fit the quasi-binomial regression with the standard variance
Step5: The plot below shows that the default variance function is
Step6: An alternative variance function is mu^2 * (1 - mu)^2.
Step7: Fit the quasi-binomial regression with the alternative variance
Step8: With the alternative variance function, the mean/variance relationship
|
<ASSISTANT_TASK:>
Python Code:
import statsmodels.api as sm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from io import StringIO
raw = StringIO(0.05,0.00,1.25,2.50,5.50,1.00,5.00,5.00,17.50
0.00,0.05,1.25,0.50,1.00,5.00,0.10,10.00,25.00
0.00,0.05,2.50,0.01,6.00,5.00,5.00,5.00,42.50
0.10,0.30,16.60,3.00,1.10,5.00,5.00,5.00,50.00
0.25,0.75,2.50,2.50,2.50,5.00,50.00,25.00,37.50
0.05,0.30,2.50,0.01,8.00,5.00,10.00,75.00,95.00
0.50,3.00,0.00,25.00,16.50,10.00,50.00,50.00,62.50
1.30,7.50,20.00,55.00,29.50,5.00,25.00,75.00,95.00
1.50,1.00,37.50,5.00,20.00,50.00,50.00,75.00,95.00
1.50,12.70,26.25,40.00,43.50,75.00,75.00,75.00,95.00)
df = pd.read_csv(raw, header=None)
df = df.melt()
df["site"] = 1 + np.floor(df.index / 10).astype(np.int)
df["variety"] = 1 + (df.index % 10)
df = df.rename(columns={"value": "blotch"})
df = df.drop("variable", axis=1)
df["blotch"] /= 100
model1 = sm.GLM.from_formula("blotch ~ 0 + C(variety) + C(site)",
family=sm.families.Binomial(), data=df)
result1 = model1.fit(scale="X2")
print(result1.summary())
plt.clf()
plt.grid(True)
plt.plot(result1.predict(linear=True), result1.resid_pearson, 'o')
plt.xlabel("Linear predictor")
plt.ylabel("Residual")
class vf(sm.families.varfuncs.VarianceFunction):
def __call__(self, mu):
return mu**2 * (1 - mu)**2
def deriv(self, mu):
return 2*mu - 6*mu**2 + 4*mu**3
bin = sm.families.Binomial()
bin.variance = vf()
model2 = sm.GLM.from_formula("blotch ~ 0 + C(variety) + C(site)", family=bin, data=df)
result2 = model2.fit(scale="X2")
print(result2.summary())
plt.clf()
plt.grid(True)
plt.plot(result2.predict(linear=True), result2.resid_pearson, 'o')
plt.xlabel("Linear predictor")
plt.ylabel("Residual")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install TensorFlow Transform
Step2: Restart the kernel to use updated packages. (On the Notebook menu, select Kernel > Restart Kernel > Restart).
Step3: Imports
Step4: Data
Step6: Transform
Step7: Syntax
Step8: Is this the right answer?
Step9: The transform_fn/ directory contains a tf.saved_model implementing with all the constants tensorflow-transform analysis results built into the graph.
Step10: A better approach is to load it using tft.TFTransformOutput. The TFTransformOutput.transform_features_layer method returns a tft.TransformFeaturesLayer object that can be used to apply the transformation
Step11: This tft.TransformFeaturesLayer expects a dictionary of batched features. So create a Dict[str, tf.Tensor] from the List[Dict[str, Any]] in raw_data
Step12: You can use the tft.TransformFeaturesLayer on it's own
Step13: Export
Step14: Imagine you trained the model.
Step15: An example export wrapper
Step16: This combined model works on the raw data, and produces exactly the same results as calling the trained model directly
Step17: This export_model includes the tft.TransformFeaturesLayer and is entierly self-contained. You can save it and restore it in another environment and still get exactly the same result
|
<ASSISTANT_TASK:>
Python Code:
!pip install --upgrade pip
!pip install -q -U tensorflow_transform
# This cell is only necessary because packages were installed while python was running.
import pkg_resources
import importlib
importlib.reload(pkg_resources)
import pathlib
import pprint
import tempfile
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
raw_data = [
{'x': 1, 'y': 1, 's': 'hello'},
{'x': 2, 'y': 2, 's': 'world'},
{'x': 3, 'y': 3, 's': 'hello'}
]
raw_data_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'y': tf.io.FixedLenFeature([], tf.float32),
'x': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string),
}))
def preprocessing_fn(inputs):
Preprocess input columns into transformed columns.
x = inputs['x']
y = inputs['y']
s = inputs['s']
x_centered = x - tft.mean(x)
y_normalized = tft.scale_to_0_1(y)
s_integerized = tft.compute_and_apply_vocabulary(s)
x_centered_times_y_normalized = (x_centered * y_normalized)
return {
'x_centered': x_centered,
'y_normalized': y_normalized,
's_integerized': s_integerized,
'x_centered_times_y_normalized': x_centered_times_y_normalized,
}
def main(output_dir):
# Ignore the warnings
with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
transformed_dataset, transform_fn = ( # pylint: disable=unused-variable
(raw_data, raw_data_metadata) | tft_beam.AnalyzeAndTransformDataset(
preprocessing_fn))
transformed_data, transformed_metadata = transformed_dataset # pylint: disable=unused-variable
# Save the transform_fn to the output_dir
_ = # TODO 1: Your code goes here
return transformed_data, transformed_metadata
output_dir = pathlib.Path(tempfile.mkdtemp())
transformed_data, transformed_metadata = main(str(output_dir))
print('\nRaw data:\n{}\n'.format(pprint.pformat(raw_data)))
print('Transformed data:\n{}'.format(pprint.pformat(transformed_data)))
!ls -l {output_dir}
# Load a SavedModel from export_dir
loaded = # TODO 2: Your code goes here
loaded.signatures['serving_default']
tf_transform_output = tft.TFTransformOutput(output_dir)
tft_layer = tf_transform_output.transform_features_layer()
tft_layer
raw_data_batch = {
's': tf.constant([ex['s'] for ex in raw_data]),
'x': tf.constant([ex['x'] for ex in raw_data], dtype=tf.float32),
'y': tf.constant([ex['y'] for ex in raw_data], dtype=tf.float32),
}
transformed_batch = tft_layer(raw_data_batch)
{key: value.numpy() for key, value in transformed_batch.items()}
class StackDict(tf.keras.layers.Layer):
def call(self, inputs):
values = [
tf.cast(v, tf.float32)
for k,v in sorted(inputs.items(), key=lambda kv: kv[0])]
return tf.stack(values, axis=1)
class TrainedModel(tf.keras.Model):
def __init__(self):
super().__init__(self)
self.concat = StackDict()
self.body = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10),
])
def call(self, inputs, training=None):
x = self.concat(inputs)
return self.body(x, training)
trained_model = TrainedModel()
trained_model_output = trained_model(transformed_batch)
trained_model_output.shape
class ExportModel(tf.Module):
def __init__(self, trained_model, input_transform):
self.trained_model = trained_model
self.input_transform = input_transform
@tf.function
def __call__(self, inputs, training=None):
x = self.input_transform(inputs)
return self.trained_model(x)
# Export the model
export_model = # TODO 3: Your code goes here
export_model_output = export_model(raw_data_batch)
export_model_output.shape
tf.reduce_max(abs(export_model_output - trained_model_output)).numpy()
import tempfile
model_dir = tempfile.mkdtemp(suffix='tft')
tf.saved_model.save(export_model, model_dir)
reloaded = tf.saved_model.load(model_dir)
reloaded_model_output = reloaded(raw_data_batch)
reloaded_model_output.shape
tf.reduce_max(abs(export_model_output - reloaded_model_output)).numpy()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Building an ANN from Sratch
Step2: We're going to build a ANN class, called NeuralNetwork, this will contain two functions, and an initializer.
Step3: 2.2 Building the Query Function
Step4: 2.3 Building the Train Function
Step5: 2.4 Assembling the NeuralNetwork Class
Step6: Training the Network on MNIST
Step7: We've initialized the ANN, so now we need to actually execute the training of it. We'll train over N epochs, which are essentially just the number of times we go over the data to see if we can continue to refine the weights.
Step8: Scoring the Network
Step9: Now we've built our first ANN. This is a pretty small one compared to some that exist in the depths of the interwebs, but ultimately it's a start.
Step10: In TensorFlow, a placeholder is a promise that we'll provide a value later; it's akin to declaring a variable, but not initializing it.
Step11: Variables are what you're used to from programming; in TensorFlow, these are considered "trainable parameters" that get added to your Graph. They're akin to declaring and initializing the variable. They have a dtype and initial value.
Step12: Softmax is one of many different activation functions. Softmax is defined as
Step13: As always, we need a loss function, like SSE in the ANN. ross-entropy arises from thinking about information compressing codes in information theory but it winds up being an important idea in lots of areas, from gambling to machine learning.
Step14: This is how we actually interact with the network we've built.
Step15: We've rebuilt an equivalent ANN in TensorFlow. This network's accuracy is about 91%, which is pretty bad for MNIST, but for a raw neural network, it's still pretty awesome.
|
<ASSISTANT_TASK:>
Python Code:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)
import numpy as np
from scipy.special import expit
def __init__():
pass
def query():
pass
def train():
pass
class NeuralNetwork():
pass
n_inodes = 1
n_hnodes = 1
n_onodes = 1
learn_rt = 1
nn = NeuralNetwork(n_inodes, n_hnodes, n_onodes, learn_rt)
epochs = 5
for e in range(epochs):
for record, label in zip(mnist.train.images, mnist.train.labels):
record[record == float(0)] = 0.01
label[label == float(0)] = 0.01
nn.train(record, label)
score = []
for record, label in zip(mnist.test.images, mnist.test.labels):
correct_label = np.argmax(label)
inputs = record * 0.99 + 0.01
outputs = nn.query(inputs)
label = np.argmax(outputs)
score.append(1 if label == correct_label else 0)
print("Performance = {0:.3f}%".format(np.array(score).mean() * 100))
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(10000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
print("Performance = {0:.3f}%".format(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) * 100))
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(10000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
print("Performance = {0:.3f}%".format(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) * 100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Split data into training and testing.
Step3: Learning a multiple regression model
Step4: Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows
Step5: Making Predictions
Step6: Compute RSS
Step7: Test your function by computing the RSS on TEST data for the example model
Step8: Create some new features
Step9: Next create the following 4 new features as column in both TEST and TRAIN data
Step10: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms.
Step11: Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients
Step12: Quiz Question
Step13: Quiz Question
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
train_data,test_data = sales.random_split(.8,seed=0)
example_features = ['sqft_living', 'bedrooms', 'bathrooms']
example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features,
validation_set = None)
example_weight_summary = example_model.get("coefficients")
print example_weight_summary
example_predictions = example_model.predict(train_data)
print example_predictions[0] # should be 271789.505878
def get_residual_sum_of_squares(model, data, outcome):
# First get the predictions
# Then compute the residuals/errors
# Then square and add them up
return(RSS)
rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])
print rss_example_train # should be 2.7376153833e+14
from math import log
train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)
test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)
# create the remaining 3 features in both TEST and TRAIN data
model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']
model_2_features = model_1_features + ['bed_bath_rooms']
model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']
# Learn the three models: (don't forget to set validation_set = None)
# Examine/extract each model's coefficients:
# Compute the RSS on TRAINING data for each of the three models and record the values:
# Compute the RSS on TESTING data for each of the three models and record the values:
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a figure
Step2: Simple Public API
Step3: The _BokehStructureGraph class
Step4: Properties of the Structure Graph
Step5: Dataframe of attributes with values, types, and docstrings
Step6: Graphical representation
|
<ASSISTANT_TASK:>
Python Code:
import bokeh
from bokeh.models.util import generate_structure_plot
from bokeh.plotting import figure
from bokeh.io import output_notebook, show
output_notebook()
import numpy as np
X = np.linspace(-1,1,100)
Y = X + np.random.normal(size=X.shape)
f=figure(width=400,height=400)
_=f.line(x=X,y=Y,color='blue',line_width=3,alpha=.5)
_=f.line(x=X,y=X,color='red',line_width=3)
show(f)
show(generate_structure_plot(f))
from bokeh.models.util import structure as st
BSG=st._BokehStructureGraph(f)
for x in BSG.graph.nodes(data=True):
print(x)
BSG.property_df
BSG.property_df['doc']=BSG.property_df['doc'].str.replace('\n','')
pd.set_option('max_colwidth',None)
BSG.property_df
show(BSG.model)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Many observation points
Step2: Compute the flow across a ring with radius r
Step3: Inflow from the river
Step4: Compute the total inflow for a single time
Step5: Show the total inflow for many times but a constant well extraction
Step6: Everything combined, many times, with varying well extraction
Step7: The first time the injection is Q.
Step8: Choose times at which we want to see the total inflow
Step9: q1
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
W = sp.exp1
kD = 600 # m2/d
S = 0.2 # [-]
x0 = 250 # m # distance from river
Q = 1200 # m3/d, extraction of te real well
r0 = 0.25 # well radius
t = 1.0 # d
a = 125 # m distance between well and river shore
# locations of well, mirror well an observation points
x1, y1 = -a, 0. # location of extraction well
x2, y2 = +a, 0. # location of mirror well
x0, y0 = 50., 100. # location of observation point
r1 = np.sqrt((x1 - x0)**2 + (y1 - y0)**2)
r2 = np.sqrt((x2 - x0)**2 + (y2 - y0)**2)
u1 = r1 ** 2 * S / (4 * kD * t)
u2 = r2 ** 2 * S / (4 * kD * t)
# drawdown
s = Q /(4 * np.pi * kD) * (W(u1) - W(u2)) # minus because mirror well has opposite Q
print("The ddn s at x0={:.1f}, y0={:.1f} at t={:.2f} d with Q={:.0f} m3/d equals {:.2f} m"\
.format(x0, y0, t, Q, s))
# Drawdown as arbitraty points x, y
x = np.linspace(-2 * a, 2 * a, 201)
y = np.zeros_like(x)
rw = 0.25 # well radius (radius of borehole)
r1 = np.sqrt((x1 - x)**2 + (y1 - y)**2)
r2 = np.sqrt((x2 - x)**2 + (y2 - y)**2)
# Use logical indexing to set only a limited number of points
# Logical indexing gives as many values as the array that are
# True of correct and False otherwise. Only the True values will be set.
r1[r1<rw] = rw
r2[r2<rw] = rw
u1 = r1**2 * S / (4 * kD * t)
u2 = r2**2 * S / (4 * kD * t)
s = Q / (4 * np.pi *kD) * (W(u1) - W(u2))
plt.title('Drawdown along a line throug the well and its mirror')
plt.xlabel('x [m], y=0')
plt.ylabel('s [m]')
plt.grid()
plt.plot(x, s)
plt.ylim(2, -2) # invert the direction of the y-axis
plt.show()
Q0 = 1200 # m3/d Well
x0, y0 = 0., 0. # the location of the well
# The random observation points
a = 50 # m a length to scale randomly chosen points
x = a * (np.random.randn(25) - 0.5) # choose 25 random values
y = a * (np.random.randn(25) - 0.5) # same
# Distance from each point to the well
r = np.sqrt((x-x0)**2 + (y-y0)**2)
# u
u = r**2 * S /(4 * kD * t)
# the specific discharge
q = Q0 / (2 * np.pi * r) * np.exp(-u) # m2/d
alpha = np.arctan2(y - y0, x- x0) # angle between vector and horizontal
qx = q * np.cos(alpha) # x component of specific discharge
qy = q * np.sin(alpha) # y component of specific discharge
scale = 2.0 # scale factor to plot specific discharge vectors
# plot the location of the well as black ('k') circle ('o') of size 8
plt.plot(x0, y0, 'ko', markersize=8)
# in a loop, plot one vector after another
for xx, yy, qxx, qyy in zip(x, y, qx, qy):
plt.plot(x, y, 'o', markersize=4) # plot marker at obs. point
plt.plot([x, x - scale * qx], [y, y - scale * qy]) # plot vector
# embelishment of the plot
plt.title('Specific discharge vectors due to well at x0={:.1f}, y0={:.1f}'\
.format(x0, y0))
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.grid()
# show it all
plt.show()
Q = 1200 # m3/d
b = 125 # m, distance of well to river shore
x1, y1 = -b, 0 # location of well
x2, y2 = +b, 0 # location of mirror well
y = np.linspace(-1000, 1000, 81)
x = np.zeros_like(y)
r1 = np.sqrt((x - x1) ** 2 + (y - y1) ** 2)
r2 = np.sqrt((x - x2) ** 2 + (y - y2) ** 2)
u1 = r1 ** 2 * S / (4 * kD * t)
u2 = r2 ** 2 * S / (4 * kD * t)
q1 = +Q / (2 * np.pi * r1) * np.exp(-u1)
q2 = -Q / (2 * np.pi * r2) * np.exp(-u2)
alpha1 = np.arctan2(y, x - x1)
alpha2 = np.arctan2(y, x - x2)
qx = q1 * np.cos(alpha1) + q2 * np.cos(alpha2)
qy = q1 * np.sin(alpha1) + q2 * np.sin(alpha2)
qin = qx
plt.title('Inflow along the river')
plt.xlabel('y [m]')
plt.ylabel('inflow m2/d')
plt.grid()
plt.plot(y, qin)
plt.show()
dy = y[1:] - y[:-1]
qm = 0.5 * (qin[:-1] + qin[1:])
Qin = np.sum(qm * dy)
print('Total inflow with Q={:.0f} at t={:.1f} d equals {:.1f} m3/d'\
.format(Q, t, Qin))
Q = 1200 # m3/d
b = 125 # m, distance of well to river shore
x1, y1 = -b, 0 # location of well
x2, y2 = +b, 0 # location of mirror well
times = np.linspace(0, 50, 501) +0.0000001 # times
y = np.linspace(-1000, 1000, 801)
x = np.zeros_like(y)
r1 = np.sqrt((x - x1) ** 2 + (y - y1) ** 2)
r2 = np.sqrt((x - x2) ** 2 + (y - y2) ** 2)
alpha1 = np.arctan2(y, x - x1)
alpha2 = np.arctan2(y, x - x2)
Qin = np.zeros_like(times)
for i, t in enumerate(times):
u1 = r1 ** 2 * S / (4 * kD * t)
u2 = r2 ** 2 * S / (4 * kD * t)
q1 = +Q / (2 * np.pi * r1) * np.exp(-u1)
q2 = -Q / (2 * np.pi * r2) * np.exp(-u2)
qx = q1 * np.cos(alpha1) + q2 * np.cos(alpha2)
qy = q1 * np.sin(alpha1) + q2 * np.sin(alpha2)
qin = qx
Qin[i] = np.sum((y[:-1] - y[1:]) * 0.5 * (qin[:-1] +qin[1:]))
plt.title('Total inflow from river')
plt.xlabel('t [d]')
plt.ylabel('total inflow m3/d')
plt.ylim(0, Q)
plt.grid()
plt.plot(times, -Qin)
plt.show()
Q0 = 2400 # m3/d
kD = 500 # m2/d
S = 0.2 # [-]
# all variable that are constant in time:
b = 1500 # m, distance of well to river shore
x1, y1 = -b, 0 # location of well
x2, y2 = +b, 0 # location of mirror well
# all parameters that are constant in time
y = np.linspace(-1000, 1000, 81)
x = np.zeros_like(y)
dy = y[1:] - y[:-1]
r1 = np.sqrt((x - x1) ** 2 + (y - y1) ** 2)
r2 = np.sqrt((x - x2) ** 2 + (y - y2) ** 2)
alpha1 = np.arctan2(y, x - x1)
alpha2 = np.arctan2(y, x - x2)
# Set the times when pumping switches
# start, end, step
month = np.arange(9, 120, 6) # month numbers at which flow switches
#counting from Jan 1 in first year
Tsw = 30 * month # t at which flow switches in days
# set the pumping flow at the switch points
Qsw = Q0 * 2 * (-1) ** np.arange(len(Tsw))
Qsw[0] = Q0
# Show them
print('{:>6} {:>6}'.format('Tsw[d]', 'Qsw'))
for tsw, Q in zip(Tsw, Qsw):
print('{:6.0f} {:6.0f}'.format(tsw, Q))
# The small values prevents computing flows at the time the well starts
# t is now just a negilible time later.
# start end step
times = np.arange(0, 30 * 120, 10) + 0.00001 # 10 years in days in 10 day steps
Q0 = 2400 # m3/d
b = 1500 # m, distance of well to river shore
x1, y1 = -b, 0 # location of well
x2, y2 = +b, 0 # location of mirror well
times = np.linspace(0, 30 * 120, 501) +0.0000001 # times
# points along the river
y = np.linspace(-1000, 1000, 801)
x = np.zeros_like(y)
r1 = np.sqrt((x - x1) ** 2 + (y - y1) ** 2)
r2 = np.sqrt((x - x2) ** 2 + (y - y2) ** 2)
alpha1 = np.arctan2(y, x - x1)
alpha2 = np.arctan2(y, x - x2)
Qps = np.zeros_like(times) # pumping station
Qriv = np.zeros_like(times) # inflow from river
for i, t in enumerate(times):
for tsw, qsw in zip(Tsw, Qsw):
if t > tsw:
u1 = r1 ** 2 * S / (4 * kD * (t - tsw))
u2 = r2 ** 2 * S / (4 * kD * (t - tsw))
q1 = +qsw / (2 * np.pi * r1) * np.exp(-u1)
q2 = +qsw / (2 * np.pi * r2) * np.exp(-u2)
qx = q1 * np.cos(alpha1) - q2 * np.cos(alpha2)
qy = q1 * np.sin(alpha1) - q2 * np.sin(alpha2)
qin = qx
Qps[i] = Qps[i] + qsw
Qriv[i]= Qriv[i] + np.sum((y[:-1] - y[1:]) * 0.5 * (qin[:-1] +qin[1:]))
plt.title('Total inflow from river')
plt.xlabel('t [d]')
plt.ylabel('total inflow m3/d')
plt.ylim(-2*Q0, 2*Q0)
plt.grid()
plt.plot(times, Qps, 'r')
plt.plot(times, Qriv, 'b')
plt.show()
Qsw
q1
times[-1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definition
|
<ASSISTANT_TASK:>
Python Code:
print(tf.nn.softmax_cross_entropy_with_logits.__doc__)
import tensorflow as tf
from keras.layers.advanced_activations import LeakyReLU, PReLU
def LeakyRelu(x, alpha):
return tf.maximum(alpha*x, x)
with tf.Session() as sess:
inp = tf.Variable(initial_value=tf.random_uniform(shape=[5], minval=-5, maxval=5, dtype=tf.float32))
alpha = 0.5
res = LeakyRelu(inp, alpha)
sess.run(tf.global_variables_initializer())
before, after = sess.run([inp, res])
print('before', before)
print('after', after)
def PRelu(x):
alpha = tf.Variable(initial_value=tf.random_normal(shape=x.shape))
return tf.where(x < 0, alpha * x, tf.nn.relu(x))
with tf.Session() as sess:
inp = tf.Variable(initial_value=tf.random_uniform(shape=[5], minval=-5, maxval=5, dtype=tf.float32))
alpha = 0.5
res = PRelu(inp)
sess.run(tf.global_variables_initializer())
before, after = sess.run([inp, res])
print('before', before)
print('after', after)
def spp_layer(input_, levels=[2, 1], name = 'SPP_layer'):
'''Multiple Level SPP layer.
Works for levels=[1, 2, 3, 6].'''
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
pool_outputs = []
for l in levels:
pool = tf.nn.max_pool(input_, ksize=[1, np.ceil(shape[1] * 1. / l).astype(np.int32),
np.ceil(shape[2] * 1. / l).astype(np.int32), 1],
strides=[1, np.floor(shape[1] * 1. / l + 1).astype(np.int32),
np.floor(shape[2] * 1. / l + 1), 1],
padding='SAME')
pool_outputs.append(tf.reshape(pool, [shape[0], -1]))
spp_pool = tf.concat(1, pool_outputs)
return spp_pool
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interact basics
Step2: Use the interact function to interact with the print_sum function.
Step3: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
Step4: Use the interact function to interact with the print_string function.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def print_sum(a, b):
c = a + b
print (c)
interact(print_sum, a = (-10.0,10.0,1.0), b = (-8.0,8.0,2.0));
assert True # leave this for grading the print_sum exercise
def print_string(s, length=False):
print (s)
if length == True:
print (len(s))
interact(print_string, s = 'Hello World!', length=True);
assert True # leave this for grading the print_string exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sections
Step2: <br>
Step3: To change the float type globally, execute
Step4: You can run a Python script on CPU via
Step5: Updating shared arrays.
Step6: We can use the givens variable to insert values into the graph before compiling it. Using this approach we can reduce the number of transfers from RAM (via CPUs) to GPUs to speed up learning with shared variables. If we use inputs, a datasets is transferred from the CPU to the GPU multiple times, for example, if we iterate over a dataset multiple times (epochs) during gradient descent. Via givens, we can keep the dataset on the GPU if it fits (e.g., a mini-batch).
Step7: <br>
Step8: Implementing the training function.
Step9: Plotting the sum of squared errors cost vs epochs.
Step10: Making predictions.
Step11: <br>
Step12: Now, imagine a MLP perceptron with 3 hidden units + 1 bias unit in the hidden unit. The output layer consists of 3 output units.
Step13: <br>
Step14: <br>
Step16: <br>
Step17: Multi-layer Perceptron in Keras
Step18: One-hot encoding of the class variable
|
<ASSISTANT_TASK:>
Python Code:
%load_ext watermark
%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,matplotlib,theano,keras
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
import theano
from theano import tensor as T
# initialize
x1 = T.scalar()
w1 = T.scalar()
w0 = T.scalar()
z1 = w1 * x1 + w0
# compile
net_input = theano.function(inputs=[w1, x1, w0], outputs=z1)
# execute
net_input(2.0, 1.0, 0.5)
print(theano.config.floatX)
theano.config.floatX = 'float32'
print(theano.config.device)
import numpy as np
# initialize
x = T.fmatrix(name='x')
x_sum = T.sum(x, axis=0)
# compile
calc_sum = theano.function(inputs=[x], outputs=x_sum)
# execute (Python list)
ary = [[1, 2, 3], [1, 2, 3]]
print('Column sum:', calc_sum(ary))
# execute (NumPy array)
ary = np.array([[1, 2, 3], [1, 2, 3]], dtype=theano.config.floatX)
print('Column sum:', calc_sum(ary))
# initialize
x = T.fmatrix(name='x')
w = theano.shared(np.asarray([[0.0, 0.0, 0.0]],
dtype=theano.config.floatX))
z = x.dot(w.T)
update = [[w, w + 1.0]]
# compile
net_input = theano.function(inputs=[x],
updates=update,
outputs=z)
# execute
data = np.array([[1, 2, 3]], dtype=theano.config.floatX)
for i in range(5):
print('z%d:' % i, net_input(data))
# initialize
data = np.array([[1, 2, 3]],
dtype=theano.config.floatX)
x = T.fmatrix(name='x')
w = theano.shared(np.asarray([[0.0, 0.0, 0.0]],
dtype=theano.config.floatX))
z = x.dot(w.T)
update = [[w, w + 1.0]]
# compile
net_input = theano.function(inputs=[],
updates=update,
givens={x: data},
outputs=z)
# execute
for i in range(5):
print('z:', net_input())
import numpy as np
X_train = np.asarray([[0.0], [1.0], [2.0], [3.0], [4.0],
[5.0], [6.0], [7.0], [8.0], [9.0]],
dtype=theano.config.floatX)
y_train = np.asarray([1.0, 1.3, 3.1, 2.0, 5.0,
6.3, 6.6, 7.4, 8.0, 9.0],
dtype=theano.config.floatX)
import theano
from theano import tensor as T
import numpy as np
def train_linreg(X_train, y_train, eta, epochs):
costs = []
# Initialize arrays
eta0 = T.fscalar('eta0')
y = T.fvector(name='y')
X = T.fmatrix(name='X')
w = theano.shared(np.zeros(
shape=(X_train.shape[1] + 1),
dtype=theano.config.floatX),
name='w')
# calculate cost
net_input = T.dot(X, w[1:]) + w[0]
errors = y - net_input
cost = T.sum(T.pow(errors, 2))
# perform gradient update
gradient = T.grad(cost, wrt=w)
update = [(w, w - eta0 * gradient)]
# compile model
train = theano.function(inputs=[eta0],
outputs=cost,
updates=update,
givens={X: X_train,
y: y_train,})
for _ in range(epochs):
costs.append(train(eta))
return costs, w
%matplotlib inline
import matplotlib.pyplot as plt
costs, w = train_linreg(X_train, y_train, eta=0.001, epochs=10)
plt.plot(range(1, len(costs)+1), costs)
plt.tight_layout()
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.tight_layout()
# plt.savefig('./figures/cost_convergence.png', dpi=300)
plt.show()
def predict_linreg(X, w):
Xt = T.matrix(name='X')
net_input = T.dot(Xt, w[1:]) + w[0]
predict = theano.function(inputs=[Xt], givens={w: w}, outputs=net_input)
return predict(X)
plt.scatter(X_train, y_train, marker='s', s=50)
plt.plot(range(X_train.shape[0]),
predict_linreg(X_train, w),
color='gray',
marker='o',
markersize=4,
linewidth=3)
plt.xlabel('x')
plt.ylabel('y')
plt.tight_layout()
# plt.savefig('./figures/linreg.png', dpi=300)
plt.show()
# note that first element (X[0] = 1) to denote bias unit
X = np.array([[1, 1.4, 1.5]])
w = np.array([0.0, 0.2, 0.4])
def net_input(X, w):
z = X.dot(w)
return z
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = %.3f' % logistic_activation(X, w)[0])
# W : array, shape = [n_output_units, n_hidden_units+1]
# Weight matrix for hidden layer -> output layer.
# note that first column (A[:][0] = 1) are the bias units
W = np.array([[1.1, 1.2, 1.3, 0.5],
[0.1, 0.2, 0.4, 0.1],
[0.2, 0.5, 2.1, 1.9]])
# A : array, shape = [n_hidden+1, n_samples]
# Activation of hidden layer.
# note that first element (A[0][0] = 1) is for the bias units
A = np.array([[1.0],
[0.1],
[0.3],
[0.7]])
# Z : array, shape = [n_output_units, n_samples]
# Net input of output layer.
Z = W.dot(A)
y_probas = logistic(Z)
print('Probabilities:\n', y_probas)
y_class = np.argmax(Z, axis=0)
print('predicted class label: %d' % y_class[0])
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
def softmax_activation(X, w):
z = net_input(X, w)
return sigmoid(z)
y_probas = softmax(Z)
print('Probabilities:\n', y_probas)
y_probas.sum()
y_class = np.argmax(Z, axis=0)
y_class
def tanh(z):
e_p = np.exp(z)
e_m = np.exp(-z)
return (e_p - e_m) / (e_p + e_m)
import matplotlib.pyplot as plt
%matplotlib inline
z = np.arange(-5, 5, 0.005)
log_act = logistic(z)
tanh_act = tanh(z)
# alternatives:
# from scipy.special import expit
# log_act = expit(z)
# tanh_act = np.tanh(z)
plt.ylim([-1.5, 1.5])
plt.xlabel('net input $z$')
plt.ylabel('activation $\phi(z)$')
plt.axhline(1, color='black', linestyle='--')
plt.axhline(0.5, color='black', linestyle='--')
plt.axhline(0, color='black', linestyle='--')
plt.axhline(-1, color='black', linestyle='--')
plt.plot(z, tanh_act,
linewidth=2,
color='black',
label='tanh')
plt.plot(z, log_act,
linewidth=2,
color='lightgreen',
label='logistic')
plt.legend(loc='lower right')
plt.tight_layout()
# plt.savefig('./figures/activation.png', dpi=300)
plt.show()
import os
import struct
import numpy as np
def load_mnist(path, kind='train'):
Load MNIST data from `path`
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte'
% kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
X_train, y_train = load_mnist('mnist', kind='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('mnist', kind='t10k')
print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
import theano
theano.config.floatX = 'float32'
X_train = X_train.astype(theano.config.floatX)
X_test = X_test.astype(theano.config.floatX)
from keras.utils import np_utils
print('First 3 labels: ', y_train[:3])
y_train_ohe = np_utils.to_categorical(y_train)
print('\nFirst 3 labels (one-hot):\n', y_train_ohe[:3])
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
np.random.seed(1)
model = Sequential()
model.add(Dense(input_dim=X_train.shape[1],
output_dim=50,
init='uniform',
activation='tanh'))
model.add(Dense(input_dim=50,
output_dim=50,
init='uniform',
activation='tanh'))
model.add(Dense(input_dim=50,
output_dim=y_train_ohe.shape[1],
init='uniform',
activation='softmax'))
sgd = SGD(lr=0.001, decay=1e-7, momentum=.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(X_train, y_train_ohe,
nb_epoch=50,
batch_size=300,
verbose=1,
validation_split=0.1,
show_accuracy=True)
y_train_pred = model.predict_classes(X_train, verbose=0)
print('First 3 predictions: ', y_train_pred[:3])
train_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (train_acc * 100))
y_test_pred = model.predict_classes(X_test, verbose=0)
test_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (test_acc * 100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As bayesians, we model the problem as finding the parameter $\theta$ of a bernoulli distribution given the data. For this, we start with an uniform prior, since with this prior we make the least number of assumptions about $\theta$
Step2: The likelihood of the dataset is given by $\theta^{N_{\mathrm{heads}}} (1 - \theta)^{N_{\mathrm{tails}}} \propto Beta(\theta|N_{\mathrm{heads}} + 1, N_{\mathrm{tails}} + 1)$
Step3: The posterior is, therefore, the product
Step4: If we, instead, had a prior $Beta(2, 5)$ we would have
Step5: We can see the difference between both priors in the following figure
Step6: Notice how, in both cases, the most probable value for $\theta$ is close to the real 0.7 value. We can also see that the posterior in the first case, where the prior is uniform, is equal to the likelihood. On the other hand, if our prior tends to other values of $\theta$, the posterior approaches the likelihood more slowly.
Step7: Sex
Step8: Height
Step9: Weight
Step10: Shoe size
Step11: Predicting
|
<ASSISTANT_TASK:>
Python Code:
coin = bernoulli(0.7)
samples = coin.rvs(20)
num_heads = sum(samples)
num_tails = len(samples) - num_heads
prior_1 = beta(1,1)
likelihood = beta(num_heads+1, num_tails+1)
posterior_1 = beta(num_heads+1, num_tails+1)
prior_2 = beta(2, 5)
posterior_2 = beta(num_heads + 2, num_tails + 5)
colors = sns.color_palette('husl', 10)
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(16, 4))
thetas = np.linspace(0, 1, 1000)
ax[0].plot(thetas, list(map(prior_1.pdf, thetas)), color=colors[4], label='prior')
ax[0].plot(thetas, list(map(likelihood.pdf, thetas)), color=colors[7], label='likelihood')
ax[0].plot(thetas, list(map(posterior_1.pdf, thetas)), color=colors[0], label='posterior')
ax[0].legend(loc='upper left')
ax[0].set_xlabel('theta')
ax[0].set_ylabel('pdf')
ax[0].set_title('Beta(1,1) prior')
ax[1].plot(thetas, list(map(prior_2.pdf, thetas)), color=colors[4], label='prior')
ax[1].plot(thetas, list(map(likelihood.pdf, thetas)), color=colors[7], label='likelihood')
ax[1].plot(thetas, list(map(posterior_2.pdf, thetas)), color=colors[0], label='posterior')
ax[1].legend(loc='upper left')
ax[1].set_xlabel('theta')
ax[1].set_ylabel('pdf')
ax[1].set_title('Beta(2,5) prior')
plt.suptitle('Different priors with the same data')
plt.show()
people = pd.DataFrame([
['M', 6, 180, 12],
['M', 5.92, 190, 11],
['M', 5.58, 170, 12],
['M', 5.92, 165, 10],
['F', 5, 100, 6],
['F', 5.5, 150, 8],
['F', 5.42, 130, 7],
['F', 5.75, 150, 9]],
columns = ['sex', 'height', 'weight', 'size']
)
people
number_of_people = len(people)
mask_male = people.sex == 'M'
mask_female = people.sex == 'F'
prob_male = sum(mask_male)/number_of_people
prob_female = sum(mask_female)/number_of_people
males_height = people.loc[mask_male, 'height']
males_height_dist = norm(loc=males_height.mean(), scale=males_height.std())
females_height = people.loc[mask_female, 'height']
females_height_dist = norm(loc=females_height.mean(), scale=females_height.std())
colors = sns.color_palette('RdBu', 10)
heights = np.linspace(4, 7, 100)
plt.plot(heights, females_height_dist.pdf(heights), label='female', color=colors[0])
plt.plot(heights, males_height_dist.pdf(heights), label='male', color=colors[-1])
plt.xlabel('Height')
plt.ylabel('PDF')
plt.title('Distribution of heights by sex')
plt.legend()
plt.show()
males_weight = people.loc[mask_male, 'weight']
males_weight_dist = norm(loc=males_weight.mean(), scale=males_weight.std())
females_weight = people.loc[mask_female, 'weight']
females_weight_dist = norm(loc=females_weight.mean(), scale=females_weight.std())
colors = sns.color_palette("RdBu", 10)
weights = np.linspace(60, 220, 100)
plt.plot(weights, females_weight_dist.pdf(weights), label='female', color=colors[0])
plt.plot(weights, males_weight_dist.pdf(weights), label='male', color=colors[-1])
plt.xlabel('Weight')
plt.ylabel('PDF')
plt.title('Distribution of weights by sex')
plt.legend()
plt.show()
males_size = people.loc[mask_male, 'size']
males_size_dist = norm(loc=males_size.mean(), scale=males_size.std())
females_size = people.loc[mask_female, 'size']
females_size_dist = norm(loc=females_size.mean(), scale=females_size.std())
colors = sns.color_palette("RdBu", 10)
sizes = np.linspace(3, 15, 100)
plt.plot(sizes, females_size_dist.pdf(sizes), label='female', color= colors[0])
plt.plot(sizes, males_size_dist.pdf(sizes), label='male', color= colors[-1])
plt.xlabel('Shoe size')
plt.ylabel('PDF')
plt.title('Distribution of shoe size by sex')
plt.legend()
plt.show()
person = namedtuple('person', ['height', 'weight', 'size'])
def male(person):
likelihood = males_height_dist.pdf(person.height) * \
males_weight_dist.pdf(person.weight) * \
males_size_dist.pdf(person.size)
prior = prob_male
return likelihood * prior
def female(person):
likelihood = females_height_dist.pdf(person.height) * \
females_weight_dist.pdf(person.weight) * \
females_size_dist.pdf(person.size)
prior = prob_female
return likelihood * prior
mary = person(5.68, 120, 7.5)
mary_male, mary_female = male(mary), female(mary)
mary_total = mary_male + mary_female
print("The probability of Mary be a male is:", mary_male/mary_total)
print("The probability of Mary be a female is:", mary_female/mary_total)
john = person(7, 200, 14)
john_male, john_female = male(john), female(john)
john_total = john_male + john_female
print("The probability of John be a male is:", john_male/john_total)
print("The probability of John be a female is:", john_female/john_total)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a function to download and save SRTM images using BMI_topography.
Step2: Make function to plot DEMs and drainage accumulation with shaded relief.
Step3: Compare default Landlab flow accumulator with priority flood flow accumulator
Step4: Priority flood flow director/accumulator
Step5: Priority flood flow director/accumulator
|
<ASSISTANT_TASK:>
Python Code:
import sys, time, os
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from landlab.components import FlowAccumulator, PriorityFloodFlowRouter, ChannelProfiler
from landlab.io.netcdf import read_netcdf
from landlab.utils import get_watershed_mask
from landlab import imshowhs_grid, imshow_grid
from landlab.io import read_esri_ascii, write_esri_ascii
from bmi_topography import Topography
def get_topo(buffer, north=40.16, south=40.14, east=-105.49, west=-105.51):
params = Topography.DEFAULT.copy()
params["south"] = south - buffer
params["north"] = north + buffer
params["west"] = -105.51 - buffer
params["east"] = -105.49 + buffer
params["output_format"] = "AAIGrid"
params["cache_dir"] = Path.cwd()
dem = Topography(**params)
name = dem.fetch()
props = dem.load()
dim_x = props.sizes["x"]
dim_y = props.sizes["y"]
cells = props.sizes["x"] * props.sizes["y"]
grid, z = read_esri_ascii(name, name="topographic__elevation")
return dim_x, dim_y, cells, grid, z, dem
def plotting(
grid, topo=True, DA=True, hill_DA=False, flow_metric="D8", hill_flow_metric="Quinn"
):
if topo:
azdeg = 200
altdeg = 20
ve = 1
plt.figure()
plot_type = "DEM"
ax = imshowhs_grid(
grid,
"topographic__elevation",
grid_units=("deg", "deg"),
var_name="Topo, m",
cmap="terrain",
plot_type=plot_type,
vertical_exa=ve,
azdeg=azdeg,
altdeg=altdeg,
default_fontsize=12,
cbar_tick_size=10,
cbar_width="100%",
cbar_or="vertical",
bbox_to_anchor=[1.03, 0.3, 0.075, 14],
colorbar_label_y=-15,
colorbar_label_x=0.5,
ticks_km=False,
)
if DA:
# %% Plot first instance of drainage_area
grid.at_node["drainage_area"][grid.at_node["drainage_area"] == 0] = (
grid.dx * grid.dx
)
plot_DA = np.log10(grid.at_node["drainage_area"] * 111e3 * 111e3)
plt.figure()
plot_type = "Drape1"
drape1 = plot_DA
thres_drape1 = None
alpha = 0.5
myfile1 = "temperature.cpt"
cmap1 = "terrain"
ax = imshowhs_grid(
grid,
"topographic__elevation",
grid_units=("deg", "deg"),
cmap=cmap1,
plot_type=plot_type,
drape1=drape1,
vertical_exa=ve,
azdeg=azdeg,
altdeg=altdeg,
thres_drape1=thres_drape1,
alpha=alpha,
default_fontsize=12,
cbar_tick_size=10,
var_name="$log^{10}DA, m^2$",
cbar_width="100%",
cbar_or="vertical",
bbox_to_anchor=[1.03, 0.3, 0.075, 14],
colorbar_label_y=-15,
colorbar_label_x=0.5,
ticks_km=False,
)
props = dict(boxstyle="round", facecolor="white", alpha=0.6)
textstr = flow_metric
ax.text(
0.05,
0.95,
textstr,
transform=ax.transAxes,
fontsize=10,
verticalalignment="top",
bbox=props,
)
if hill_DA:
# Plot second instance of drainage_area (hill_drainage_area)
grid.at_node["hill_drainage_area"][grid.at_node["hill_drainage_area"] == 0] = (
grid.dx * grid.dx
)
plotDA = np.log10(grid.at_node["hill_drainage_area"] * 111e3 * 111e3)
# plt.figure()
# imshow_grid(grid, plotDA,grid_units=("m", "m"), var_name="Elevation (m)", cmap='terrain')
plt.figure()
plot_type = "Drape1"
# plot_type='Drape2'
drape1 = np.log10(grid.at_node["hill_drainage_area"])
thres_drape1 = None
alpha = 0.5
myfile1 = "temperature.cpt"
cmap1 = "terrain"
ax = imshowhs_grid(
grid,
"topographic__elevation",
grid_units=("deg", "deg"),
cmap=cmap1,
plot_type=plot_type,
drape1=drape1,
vertical_exa=ve,
azdeg=azdeg,
altdeg=altdeg,
thres_drape1=thres_drape1,
alpha=alpha,
default_fontsize=10,
cbar_tick_size=10,
var_name="$log^{10}DA, m^2$",
cbar_width="100%",
cbar_or="vertical",
bbox_to_anchor=[1.03, 0.3, 0.075, 14],
colorbar_label_y=-15,
colorbar_label_x=0.5,
ticks_km=False,
)
props = dict(boxstyle="round", facecolor="white", alpha=0.6)
textstr = hill_flow_metric
ax.text(
0.05,
0.95,
textstr,
transform=ax.transAxes,
fontsize=10,
verticalalignment="top",
bbox=props,
)
# Download or reload topo data with given buffer
dim_x, dim_y, cells, grid_LL, z_LL, dem = get_topo(0.05)
fa_LL = FlowAccumulator(
grid_LL, flow_director="D8", depression_finder="DepressionFinderAndRouter"
)
fa_LL.run_one_step()
# Plot output products
plotting(grid_LL)
# Download or reload topo data with given buffer
dim_x, dim_y, cells, grid_PF, z_PF, dem = get_topo(0.05)
# Here, we only calculate flow directions using the first instance of the flow accumulator
flow_metric = "D8"
fa_PF = PriorityFloodFlowRouter(
grid_PF,
surface="topographic__elevation",
flow_metric=flow_metric,
suppress_out=False,
depression_handler="fill",
accumulate_flow=True,
)
fa_PF.run_one_step()
# Plot output products
plotting(grid_PF)
# 3. Priority flow director/accumualtor
# Download or reload topo data with given buffer
dim_x, dim_y, cells, grid_PF, z_PF, dem = get_topo(0.05)
# For timing compare only single flow
flow_metric = "D8"
hill_flow_metric = "Quinn"
fa_PF = PriorityFloodFlowRouter(
grid_PF,
surface="topographic__elevation",
flow_metric=flow_metric,
suppress_out=False,
depression_handler="fill",
accumulate_flow=True,
separate_hill_flow=True,
accumulate_flow_hill=True,
update_hill_flow_instantaneous=False,
hill_flow_metric=hill_flow_metric,
)
fa_PF.run_one_step()
fa_PF.update_hill_fdfa()
# 4. Plot output products
plotting(grid_PF, hill_DA=True, flow_metric="D8", hill_flow_metric="Quinn")
# Remove downloaded DEM. Uncomment to remove DEM.
# os.remove(dem.fetch())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fairness Indicators on TF-Hub Text Embeddings
Step2: Import other required libraries.
Step3: Dataset
Step4: By default, the notebook downloads a preprocessed version of this dataset, but
Step5: Create a TensorFlow Model Analysis Pipeline
Step6: Run TFMA & Fairness Indicators
Step7: NNLM
Step8: Universal Sentence Encoder
Step9: Comparing Embeddings
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q -U pip==20.2
!pip install fairness-indicators \
"absl-py==0.12.0" \
"pyarrow==2.0.0" \
"apache-beam==2.38.0" \
"avro-python3==1.9.1"
import os
import tempfile
import apache_beam as beam
from datetime import datetime
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.addons.fairness.view import widget_view
from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators
from fairness_indicators import example_model
from fairness_indicators.tutorial_utils import util
BASE_DIR = tempfile.gettempdir()
# The input and output features of the classifier
TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
FEATURE_MAP = {
# input and output features
LABEL: tf.io.FixedLenFeature([], tf.float32),
TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),
# slicing features
'sexual_orientation': tf.io.VarLenFeature(tf.string),
'gender': tf.io.VarLenFeature(tf.string),
'religion': tf.io.VarLenFeature(tf.string),
'race': tf.io.VarLenFeature(tf.string),
'disability': tf.io.VarLenFeature(tf.string)
}
IDENTITY_TERMS = ['gender', 'sexual_orientation', 'race', 'religion', 'disability']
download_original_data = False #@param {type:"boolean"}
if download_original_data:
train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord')
# The identity terms list will be grouped together by their categories
# (see 'IDENTITY_COLUMNS') on threshold 0.5. Only the identity term column,
# text column and label column will be kept after processing.
train_tf_file = util.convert_comments_data(train_tf_file)
validate_tf_file = util.convert_comments_data(validate_tf_file)
else:
train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord')
def embedding_fairness_result(embedding, identity_term='gender'):
model_dir = os.path.join(BASE_DIR, 'train',
datetime.now().strftime('%Y%m%d-%H%M%S'))
print("Training classifier for " + embedding)
classifier = example_model.train_model(model_dir,
train_tf_file,
LABEL,
TEXT_FEATURE,
FEATURE_MAP,
embedding)
# Create a unique path to store the results for this embedding.
embedding_name = embedding.split('/')[-2]
eval_result_path = os.path.join(BASE_DIR, 'eval_result', embedding_name)
example_model.evaluate_model(classifier,
validate_tf_file,
eval_result_path,
identity_term,
LABEL,
FEATURE_MAP)
return tfma.load_eval_result(output_path=eval_result_path)
eval_result_random_nnlm = embedding_fairness_result('https://tfhub.dev/google/random-nnlm-en-dim128/1')
widget_view.render_fairness_indicator(eval_result=eval_result_random_nnlm)
eval_result_nnlm = embedding_fairness_result('https://tfhub.dev/google/nnlm-en-dim128/1')
widget_view.render_fairness_indicator(eval_result=eval_result_nnlm)
eval_result_use = embedding_fairness_result('https://tfhub.dev/google/universal-sentence-encoder/2')
widget_view.render_fairness_indicator(eval_result=eval_result_use)
widget_view.render_fairness_indicator(multi_eval_results={'nnlm': eval_result_nnlm, 'use': eval_result_use})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With no units
Step2: The species_attributes section defines a diffusion constant and radius of Species, A, B and C. For example, the diffusion rate of A is 1, and its dimensionality is expected to be [length**2/time]. However, what is the scale? Is it meter? Or mile?
Step3: Introducing units
Step4: First, create your own unit system,ureg, by using ecell4.extra.unit.getUnitRegistry. With this UnitRegistry, you can make a quantity with its unit as ureg.Quantity(value, unit). (Please be careful about the type of Quantity. It looks same with Quantity given by pint, but is slightly changed in ecell4 though all the original functionality in pint is availble even in ecell4. Please not use ureg = pint.UnitRegistry().)
Step5: The default base units are meter for [length], second for [time], and item (which means the number of molecules) for [substance]. When you change the default base unit, do like ureg = getUnitRegistry(length='micrometer').
Step6: Now you can provide quantities in any unit regardless of the base units.
Step7: You can operate quantities, and make a new quantity. See https
Step8: In addition to the model creation, run_simulation (and ensemble_simulations) also supports the unit system.
Step9: Even if you change the base units, the behavior of simulations is kept consistent. In the following example, base units are rescaled to micrometer and minute with no change in the modeling section.
Step10: Checking dimensionality
Step11: When checking dimensionality of units in the model by check_model, you can omit no unit.
Step12: A kinetic rate constant of reactions is verified based on the order of the reaction. The first order reaction rate should have [1/time], and the second order should have [l/(substance/length**3)/time] in volume.
Step13: The dimensionality of a synthetic reaction depends on the dimension which the products belongs to.
Step14: An unit of the reaction rate between a molecule and structure is also tested.
Step15: Additionally, rate law representations accept quantities with a unit too. See the example below
Step16: Here, the reaction above has two quantities, Vmax = Q_(1.0, 'uM/s') and Km = Q_(100, 'nM'). First, Km must have the same dimensionality with S, which is [concentration].
Step17: Secondly, the dimensionality of a rate equation must be [concentration/time]. Therefore, the dimensionality of Vmax should be [concentration/time] too.
Step18: When you give a value with no unit, it is regarded as dimensionless.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ecell4.prelude import *
with species_attributes():
A | B | C | {'D': 1, 'radius': 0.005}
with reaction_rules():
A + B == C | (0.01, 0.3)
m = get_model()
show(m)
from ecell4.extra.unit import getUnitRegistry
ureg = getUnitRegistry()
Q_ = ureg.Quantity
with species_attributes():
A | B | C | {'D': Q_(1, 'um**2/s'), 'radius': Q_(0.005, 'um')}
with reaction_rules():
A + B == C | (Q_(0.01, '1/(item/um**3)/s'), Q_(0.3, '1/s'))
m = get_model()
show(m)
with species_attributes():
A | B | C | {'D': Q_(1e-8, 'cm**2/s'), 'radius': Q_(5, 'nm')}
with reaction_rules():
A + B == C | (Q_(6.02214129, '1/uM/s'), Q_(18, '1/min'))
m = get_model()
show(m)
volume = Q_(1, 'fL')
conc = Q_(100, 'nM')
print((volume * conc).to('item'))
run_simulation(Q_(0.1, 'min'), y0={'C': Q_(60, 'item')}, volume=Q_(1, 'fL'), model=m, solver='ode')
ureg = getUnitRegistry(length='micrometer', time='minute')
Q_ = ureg.Quantity
with species_attributes():
A | B | C | {'D': Q_(1e-8, 'cm**2/s'), 'radius': Q_(5, 'nm')}
with reaction_rules():
A + B == C | (Q_(6.02214129, '1/uM/s'), Q_(18, '1/min'))
m = get_model()
show(m)
run_simulation(Q_(0.1, 'min'), y0={'C': Q_(60, 'item')}, volume=Q_(1, 'fL'), model=m, solver='ode')
from ecell4.extra.unit import check_model, DimensionalityMismatchError
ureg = getUnitRegistry()
Q_ = ureg.Quantity
with species_attributes():
A | {'radius': Q_(0.005, 'um'), 'D': Q_(1.0, 'um/s')}
try:
check_model(get_model())
except DimensionalityMismatchError as e:
print('{}: {}'.format(e.__class__.__name__, e))
with species_attributes():
A | {'radius': 0.005, 'D': Q_(1.0, 'um**2/s')}
try:
check_model(get_model())
except DimensionalityMismatchError as e:
print('{}: {}'.format(e.__class__.__name__, e))
with reaction_rules():
A + B > C | Q_(0.3, '1/s')
try:
check_model(get_model())
except DimensionalityMismatchError as e:
print('{}: {}'.format(e.__class__.__name__, e))
with reaction_rules():
~A > A | Q_(0.3, '1/s')
try:
check_model(get_model())
except DimensionalityMismatchError as e:
print('{}: {}'.format(e.__class__.__name__, e))
with species_attributes():
B | {'location': 'M'}
M | {'dimension': 2}
with reaction_rules():
A + M > B | Q_(0.3, '1/s')
try:
check_model(get_model())
except DimensionalityMismatchError as e:
print('{}: {}'.format(e.__class__.__name__, e))
with reaction_rules():
S > P | Q_(1.0, 'uM/s') * S / (Q_(100, 'nM') + S)
check_model(get_model())
with reaction_rules():
S > P | Q_(1.0, 'uM/s') * S / (Q_(100, 'nM/s') + S)
try:
check_model(get_model())
except DimensionalityMismatchError as e:
print('{}: {}'.format(e.__class__.__name__, e))
with reaction_rules():
S > P | Q_(1.0, '1/s') * S / (Q_(100, 'nM') + S)
try:
check_model(get_model())
except DimensionalityMismatchError as e:
print('{}: {}'.format(e.__class__.__name__, e))
with reaction_rules():
S > P | 10.0 * Q_(0.1, 'uM/s') * S**2 / (Q_(100, 'nM')**2 + S**2)
m = get_model()
show(m)
check_model(m)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define peak density function
Step2: Simulate and export data from 10 subjects
Step3: Perform group analysis and extract peaks from Tstat-map
Step4: Plot observed distribution of peaks with theoretical distribution (under H_0)
Step5: Compute p-values based on theoretical distribution (by numerical integration)
Step6: Compute proportion of activation based on BUM model
Step7: Plot histogram of p-values with expected distribution (beta and uniform)
Step8: Apply power procedure WITH threshold
Step10: Adjust power procedure without threshold
Step11: Figures for JSM
Step12: $P(T>t | H_0, t>u)$
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
% matplotlib inline
import numpy as np
import scipy
import scipy.stats as stats
import scipy.optimize as optimize
import scipy.integrate as integrate
from __future__ import print_function, division
import os
import math
from nipy.labs.utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset
from nipype.interfaces import fsl
import nibabel as nib
import matplotlib.pyplot as plt
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
import scipy.stats as stats
os.chdir("/Users/Joke/Documents/Onderzoek/Studie_7_neuropower_improved/WORKDIR/")
def peakdens1D(x,k):
f1 = (3-k**2)**0.5/(6*math.pi)**0.5*np.exp(-3*x**2/(2*(3-k**2)))
f2 = 2*k*x*math.pi**0.5/6**0.5*stats.norm.pdf(x)*stats.norm.cdf(k*x/(3-k**2)**0.5)
out = f1*f2
return out
def peakdens2D(x,k):
f1 = 3**0.5*k**2*(x**2-1)*stats.norm.pdf(x)*stats.norm.cdf(k*x/(2-k**2)**0.5)
f2 = k*x*(3*(2-k**2))**0.5/(2*math.pi) * np.exp(-x**2/(2-k**2))
f31 = 6**0.5/(math.pi*(3-k**2))**0.5*np.exp(-3*x**2/(2*(3-k**2)))
f32 = stats.norm.cdf(k*x/((3-k**2)*(2-k**2))**0.5)
out = f1+f2+f31*f32
return out
def peakdens3D(x,k):
fd1 = 144*stats.norm.pdf(x)/(29*6**(0.5)-36)
fd211 = k**2.*((1.-k**2.)**3. + 6.*(1.-k**2.)**2. + 12.*(1.-k**2.)+24.)*x**2. / (4.*(3.-k**2.)**2.)
fd212 = (2.*(1.-k**2.)**3. + 3.*(1.-k**2.)**2.+6.*(1.-k**2.)) / (4.*(3.-k**2.))
fd213 = 3./2.
fd21 = (fd211 + fd212 + fd213)
fd22 = np.exp(-k**2.*x**2./(2.*(3.-k**2.))) / (2.*(3.-k**2.))**(0.5)
fd23 = stats.norm.cdf(2.*k*x / ((3.-k**2.)*(5.-3.*k**2.))**(0.5))
fd2 = fd21*fd22*fd23
fd31 = (k**2.*(2.-k**2.))/4.*x**2. - k**2.*(1.-k**2.)/2. - 1.
fd32 = np.exp(-k**2.*x**2./(2.*(2.-k**2.))) / (2.*(2.-k**2.))**(0.5)
fd33 = stats.norm.cdf(k*x / ((2.-k**2.)*(5.-3.*k**2.))**(0.5))
fd3 = fd31 * fd32 * fd33
fd41 = (7.-k**2.) + (1-k**2)*(3.*(1.-k**2.)**2. + 12.*(1.-k**2.) + 28.)/(2.*(3.-k**2.))
fd42 = k*x / (4.*math.pi**(0.5)*(3.-k**2.)*(5.-3.*k**2)**0.5)
fd43 = np.exp(-3.*k**2.*x**2/(2.*(5-3.*k**2.)))
fd4 = fd41*fd42 * fd43
fd51 = math.pi**0.5*k**3./4.*x*(x**2.-3.)
f521low = np.array([-10.,-10.])
f521up = np.array([0.,k*x/2.**(0.5)])
f521mu = np.array([0.,0.])
f521sigma = np.array([[3./2., -1.],[-1.,(3.-k**2.)/2.]])
fd521,i = stats.mvn.mvnun(f521low,f521up,f521mu,f521sigma)
f522low = np.array([-10.,-10.])
f522up = np.array([0.,k*x/2.**(0.5)])
f522mu = np.array([0.,0.])
f522sigma = np.array([[3./2., -1./2.],[-1./2.,(2.-k**2.)/2.]])
fd522,i = stats.mvn.mvnun(f522low,f522up,f522mu,f522sigma)
fd5 = fd51*(fd521+fd522)
out = fd1*(fd2+fd3+fd4+fd5)
return out
smooth_FWHM = 3
smooth_sigma = smooth_FWHM/(2*math.sqrt(2*math.log(2)))
dimensions = (50,50,50)
positions = np.array([[60,40,40],
[40,80,40],
[50,30,60]])
amplitudes = np.array([1.,1.,1.])
width = 5.
seed=123
mask = nib.load("0mask.nii")
nsub=10
noise = surrogate_3d_dataset(n_subj=nsub, shape=dimensions, mask=mask,
sk=smooth_sigma,noise_level=1.0,
width=5.0,out_text_file=None,
out_image_file=None, seed=seed)
signal = surrogate_3d_dataset(n_subj=nsub, shape=dimensions, mask=mask,
sk=smooth_sigma,noise_level=0.0, pos=positions,
ampli=amplitudes, width=10.0,out_text_file=None,
out_image_file=None, seed=seed)
low_values_indices = signal < 0.1
signal[low_values_indices] = 0
high_values_indices = signal > 0
signal[high_values_indices] = 1
data = noise+signal
fig,axs=plt.subplots(1,3,figsize=(13,3))
fig.subplots_adjust(hspace = .5, wspace=0.3)
axs=axs.ravel()
axs[0].imshow(noise[1,:,:,40])
axs[1].imshow(signal[1,:,:,40])
axs[2].imshow(data[1,:,:,40])
fig.show()
data = data.transpose((1,2,3,0))
img=nib.Nifti1Image(data,np.eye(4))
img.to_filename(os.path.join("simulated_dataset.nii.gz"))
model=fsl.L2Model(num_copes=nsub)
model.run()
flameo=fsl.FLAMEO(cope_file='simulated_dataset.nii.gz',
cov_split_file='design.grp',
design_file='design.mat',
t_con_file='design.con',
mask_file='0mask.nii',
run_mode='ols',
terminal_output='none')
flameo.run()
from StringIO import StringIO # This is for reading a string into a pandas df
import tempfile
import shutil
tstat = nib.load("stats/tstat1.nii.gz").get_data()
minimum = np.nanmin(tstat)
newdata = tstat - minimum #little trick because fsl.model.Cluster ignores negative values
img=nib.Nifti1Image(newdata,np.eye(4))
img.to_filename(os.path.join("tstat1_allpositive.nii.gz"))
input_file = os.path.join("tstat1_allpositive.nii.gz")
# 0) Creating a temporary directory for the temporary file to save the local cluster file
tmppath = tempfile.mkdtemp()
# 1) Running the command and saving output to screen into df
cmd = "cluster -i %s --thresh=0 --num=10000 --olmax=%s/locmax.txt --connectivity=26" %(input_file,tmppath)
output = StringIO(os.popen(cmd).read()) #Joke - If you need the output for the max stuffs, you can get it in this variable,
# you can read it into a pandas data frame
df = pd.DataFrame.from_csv(output, sep="\t", parse_dates=False)
df
# 2) Now let's read in the temporary file, and delete the directory and everything in it
peaks = pd.read_csv("%s/locmax.txt" %tmppath,sep="\t").drop('Unnamed: 5',1)
peaks.Value = peaks.Value + minimum
shutil.rmtree(tmppath)
peaks[:5]
xn = np.arange(-10,10,0.01)
yn = []
for x in xn:
yn.append(peakdens3D(x,1))
twocol = Paired_12.mpl_colors
plt.figure(figsize=(7,5))
plt.hist(peaks.Value,lw=0,facecolor=twocol[0],normed=True,bins=np.arange(-5,10,0.3),label="observed distribution")
plt.xlim([-1,10])
plt.ylim([0,0.6])
plt.plot(xn,yn,color=twocol[1],lw=3,label="theoretical distribution under H_0")
plt.title("histogram")
plt.xlabel("peak height")
plt.ylabel("density")
plt.legend(loc="upper left",frameon=False)
plt.show()
y = []
for x in peaks.Value:
y.append(1-integrate.quad(lambda x: peakdens3D(x,1), -20, x)[0])
ynew = [10**(-6) if x<10**(-6) else x for x in y]
peaks.P = ynew
bum = BUM.bumOptim(peaks.P,starts=100)
bum["pi1"]
twocol = Paired_12.mpl_colors
plt.figure(figsize=(7,5))
plt.hist(peaks.P,lw=0,facecolor=twocol[0],normed=True,bins=np.arange(0,1,0.1),label="observed distribution")
plt.hlines(1-bum["pi1"],0,1,color=twocol[1],lw=3,label="null part of distribution")
plt.plot(xn,stats.beta.pdf(xn,bum["a"],1)+1-bum["pi1"],color=twocol[3],lw=3,label="alternative part of distribution")
plt.xlim([0,1])
plt.ylim([0,4])
plt.title("histogram")
plt.xlabel("peak height")
plt.ylabel("density")
plt.legend(loc="upper right",frameon=False)
plt.show()
powerthres = neuropower.peakmixmodfit(peaks.Value[peaks.Value>3],bum["pi1"],3)
print(powerthres["mu"])
print(powerthres["sigma"])
twocol = Paired_12.mpl_colors
plt.figure(figsize=(7,5))
plt.hist(peaks.Value[peaks.Value>3],lw=0,facecolor=twocol[0],normed=True,bins=np.arange(3,10,0.3),label="observed distribution")
plt.xlim([3,10])
plt.ylim([0,1])
plt.plot(xn,neuropower.nulprobdens(3,xn)*(1-bum["pi1"]),color=twocol[3],lw=3,label="null distribution")
plt.plot(xn,neuropower.altprobdens(powerthres["mu"],powerthres["sigma"],3,xn)*(bum["pi1"]),color=twocol[5],lw=3, label="alternative distribution")
plt.plot(xn,neuropower.mixprobdens(powerthres["mu"],powerthres["sigma"],bum["pi1"],3,xn),color=twocol[1],lw=3,label="total distribution")
plt.title("histogram")
plt.xlabel("peak height")
plt.ylabel("density")
plt.legend(loc="upper right",frameon=False)
plt.show()
def altprobdens(mu,sigma,peaks):
out = scipy.stats.norm(mu,sigma).pdf(peaks)
return out
def mixprobdens(mu,sigma,pi1,peaks):
f0=[(1-pi1)*peakdens3D(p,1) for p in peaks]
fa=[pi1*altprobdens(mu,sigma,p) for p in peaks]
f=[x + y for x, y in zip(f0, fa)]
return(f)
def mixprobdensSLL(pars,pi1,peaks):
mu=pars[0]
sigma=pars[1]
f = mixprobdens(mu,sigma,pi1,peaks)
LL = -sum(np.log(f))
return(LL)
def nothrespeakmixmodfit(peaks,pi1):
Searches the maximum likelihood estimator for the mixture distribution of null and alternative
start = [5,0.5]
opt = scipy.optimize.minimize(mixprobdensSLL,start,method='L-BFGS-B',args=(pi1,peaks),bounds=((2.5,50),(0.1,50)))
out={'maxloglikelihood': opt.fun,
'mu': opt.x[0],
'sigma': opt.x[1]}
return out
modelfit = nothrespeakmixmodfit(peaks.Value,bum["pi1"])
twocol = Paired_12.mpl_colors
plt.figure(figsize=(7,5))
plt.hist(peaks.Value,lw=0,facecolor=twocol[0],normed=True,bins=np.arange(-2,10,0.3),label="observed distribution")
plt.xlim([-2,10])
plt.ylim([0,0.5])
plt.plot(xn,[(1-bum["pi1"])*peakdens3D(p,1) for p in xn],color=twocol[3],lw=3,label="null distribution")
plt.plot(xn,bum["pi1"]*altprobdens(modelfit["mu"],modelfit["sigma"],xn),color=twocol[5],lw=3,label="alternative distribution")
plt.plot(xn,mixprobdens(modelfit["mu"],modelfit["sigma"],bum["pi1"],xn),color=twocol[1],lw=3,label="fitted distribution")
plt.title("histogram")
plt.xlabel("peak height")
plt.ylabel("density")
plt.legend(loc="upper right",frameon=False)
plt.show()
xn = np.arange(-10,10,0.01)
newcol = ["#8C1515","#4D4F53","#000000","#B3995D"]
plt.figure(figsize=(5,3))
plt.xlim([1.7,7.8])
plt.ylim([0,2])
k = -1
for u in range(2,6):
k = k+1
print(k)
plt.plot(xn,neuropower.nulprobdens(u,xn),color=newcol[k],lw=3,label="u=%s" %(u))
plt.vlines(u,0,2,color=newcol[k],lw=1,linestyle="--")
plt.legend(loc="upper right",frameon=False)
plt.show()
plt.figure(figsize=(5,3))
plt.hlines(1-0.30,0,1,color=newcol[1],lw=3,label="null distribution")
plt.plot(xn,stats.beta.pdf(xn,0.2,1)+1-0.3,color=newcol[0],lw=3,label="alternative distribution")
plt.xlim([0,1])
plt.ylim([0,4])
plt.title("")
plt.xlabel("")
plt.ylabel("")
plt.legend(loc="upper right",frameon=False)
plt.show()
plt.figure(figsize=(5,3))
plt.xlim([2,6])
plt.ylim([0,1])
plt.plot(xn,neuropower.nulprobdens(2,xn)*0.3,color=newcol[3],lw=3,label="null distribution")
plt.plot(xn,neuropower.altprobdens(3,1,2,xn)*0.7,color=newcol[1],lw=3, label="alternative distribution")
plt.plot(xn,neuropower.mixprobdens(3,1,0.7,2,xn),color=newcol[0],lw=3,label="total distribution")
plt.title("")
plt.xlabel("")
plt.ylabel("")
plt.legend(loc="upper right",frameon=False)
plt.show()
y1 = []
ran = range(10,51)
for n in ran:
delta = 3/10**0.5
new = delta*n**0.5
y1.append(1-neuropower.altcumdens(new,1,2,4))
plt.figure(figsize=(5,3))
plt.plot(ran,y1,color=newcol[0],lw=3)
plt.xlim([10,np.max(ran)])
plt.ylim([0,1])
plt.title("")
plt.xlabel("")
plt.ylabel("")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0. Start from scratch
Step2: 1. Load a network from file / URL
Step3: 2. Get the current network view as a PNG image (embedded)
Step4: 3. Get list of available layout algorithms
Step5: 4. Apply layout
Step6: 5. Get all Visual Style Names
Step7: 6. Apply a Visual Style to a network view
Step8: 7. Get all details of a Visual Style
Step9: 8. Get all default values of a Visual Style
Step10: 9. Get all mappings for a Visual Style
Step11: 10. Change default value(s) of a Visual Style
Step12: 11. Get network view object IDs
Step13: 12. Get a view object in Cytoscape.js format
Step14: 13. Set value(s) to a view object
Step15: 14. Select/ Deselect a node
Step16: 15. Set a value to the entire column
Step17: 16. Create Visual Mappings
Step18: 17. Select nodes or edges by query
Step19: 18. Upload new values to table
Step20: Advanced Features
Step21: Get and Set GUI Panel States
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
from IPython.display import display
from IPython.display import Image
# Basic Setup
PORT_NUMBER = 1234
BASE = 'http://localhost:' + str(PORT_NUMBER) + '/v1/'
HEADERS = {'Content-Type': 'application/json'}
# Utility function to print result (JSON Printer)
def jp(data):
print(json.dumps(data, indent=4))
res = requests.delete(BASE + 'session')
jp(res.json())
# URL Parameters
url_params = {
'source': 'url',
'collection': 'Your Collection Name'
}
# Array of data source. URL of the file (remote or local)
network_files = [
'http://chianti.ucsd.edu/cytoscape-data/galFiltered.sif'
# And of course, you can add as many files as you need...
]
# Load network from URLs
res = requests.post(BASE + 'networks', params=url_params, data=json.dumps(network_files), headers=HEADERS)
jp(res.json())
suid = res.json()[0]['networkSUID'][0]
# Make a utility to get first SUID
def get_suid(response):
return res.json()[0]['networkSUID'][0]
# Let's make a utility function
def show(network_id):
url = BASE+'networks/' + str(network_id) + '/views/first.png'
print('Your image is available here: ' + url)
display(Image(url=url, embed=True))
# Call it!
show(suid)
res = requests.get(BASE + 'apply/layouts')
jp(res.json())
res = requests.get(BASE + 'apply/layouts/force-directed/' + str(suid))
show(suid)
res = requests.get(BASE + 'styles')
jp(res.json())
res = requests.get(BASE + 'apply/styles/Directed/' + str(suid))
show(suid)
res = requests.get(BASE + 'styles/Directed')
#jp(res.json())
res = requests.get(BASE + 'styles/Directed/defaults')
#jp(res.json())
res = requests.get(BASE + 'styles/Directed/mappings')
jp(res.json())
# Simply define a key-value pairs of visual properties
new_defaults = [
{
'visualProperty': 'EDGE_WIDTH',
'value': 12
},
{
'visualProperty': 'EDGE_STROKE_UNSELECTED_PAINT',
'value': '#00abff'
}
]
res = requests.put(BASE + 'styles/Directed/defaults', data=json.dumps(new_defaults), headers=HEADERS)
show(suid)
res = requests.get(BASE + 'networks/' + str(suid) + '/views')
jp(res.json())
view_id = res.json()[0]
view_url = BASE + 'networks/' + str(suid) + '/views/' + str(view_id)
res = requests.get(view_url)
cyjs_network = res.json()
# jp(cyjs_network)
# Pick a node from the network
node_suid = cyjs_network['elements']['nodes'][0]['data']['SUID']
new_values = [
{
'visualProperty': 'NODE_WIDTH',
'value': 300
},
{
'visualProperty': 'NODE_HEIGHT',
'value': 300
},
{
'visualProperty': 'NODE_FILL_COLOR',
'value': 'orange'
},
{
'visualProperty': 'NODE_SHAPE',
'value': 'diamond'
}
]
res = requests.put(view_url + '/nodes/' + str(node_suid), data=json.dumps(new_values), headers=HEADERS)
show(suid)
# Update network view values
scale_url = view_url + '/network/NETWORK_SCALE_FACTOR'
res = requests.get(scale_url)
print(scale_url)
jp(res.json())
new_values = [
{
'visualProperty': 'NETWORK_BACKGROUND_PAINT',
'value': '#aaaaaa'
},
{
'visualProperty': 'NETWORK_SCALE_FACTOR',
'value': 2.7
}
]
res = requests.put(view_url + '/network', data=json.dumps(new_values), headers=HEADERS)
# Essentially, this is just a new value in selection
new_values = [
{
'SUID': node_suid,
'value': True
}
]
res = requests.put(BASE + 'networks/' + str(suid) + '/tables/defaultnode/columns/selected', data=json.dumps(new_values), headers=HEADERS)
show(suid)
# Deselect
new_values = [
{
'SUID': node_suid,
'value': False
}
]
res = requests.put(BASE + 'networks/' + str(suid) + '/tables/defaultnode/columns/selected', data=json.dumps(new_values), headers=HEADERS)
show(suid)
res = requests.put(BASE + 'networks/' + str(suid) + '/tables/defaultnode/columns/selected?default=true', data={}, headers=HEADERS)
show(suid)
# TODO
matched_url = BASE + 'networks/' +str(suid) + '/nodes?column=selected&query=true'
res = requests.get(matched_url)
print(matched_url)
# This is an array
result = res.json()
print('Number of selected nodes = ' + str(len(result)))
# Prepare new table data. This format is a bit redundant, but is a standard JSON way to store values.
new_table_data = {
'key': 'name', # Key in the existing table. In this case, "name" column in default node table.
'dataKey': 'uniprot_id', # Mapping key ih the new data. If this value maches to the "name" value, data will be assigned to the row.
'data': [
{
'uniprot_id': 'YBR190W',
'sgd':'S000000394',
'description': 'Dubious open reading frame',
'molecular_weight': 11036.8
}, {
'uniprot_id': 'YOL059W',
'sgd':'S000005420',
'description': 'NAD-dependent glycerol 3-phosphate dehydrogenase',
'molecular_weight': 49418.2
}
]
}
# PUT the new data to default node table
res = requests.put(BASE + 'networks/' + str(suid) + '/tables/defaultnode', data=json.dumps(new_table_data), headers=HEADERS)
res = requests.get(BASE + 'apply/layouts/force-directed')
jp(res.json())
params= [
{
'name': 'defaultNodeMass',
'value': 10
},
{
'name': 'defaultSpringLength',
'value': 100
},
{
'name': 'isDeterministic',
'value': True
}
]
res = requests.put(BASE + 'apply/layouts/force-directed', data=json.dumps(params), headers=HEADERS)
res = requests.get(BASE + 'ui/panels')
jp(res.json())
panel_state = [
{
"state": "DOCK",
"name": "SOUTH"
},
{
"state": "FLOAT",
"name": "SOUTH_WEST"
}
]
res = requests.put(BASE + 'ui/panels', data=json.dumps(panel_state), headers=HEADERS)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using if-elif for discrete classification
Step2: Test your statement a few times so that you see if it works for various numbers.
|
<ASSISTANT_TASK:>
Python Code:
# These are your stellar temperatures, you're welcome!
temp = [5809, 16589, 4698, 1869, 37809, 8634]
# Fill in the parentheses. Don't forget indentation!
n = random_number(50,250) # this should be given!
if ():
#print statement here
elif ():
#print statement here
else:
#print statement here
# Define your loop here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare Data
Step2: Define Network
Step3: Train Network
Step4: Evaluate Network
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
from sklearn.metrics import accuracy_score, confusion_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
%matplotlib inline
DATA_DIR = "../../data"
MODEL_FILE = os.path.join(DATA_DIR, "torch-10-cumsum-predict-{:d}.model")
TRAIN_SIZE = 7500
VAL_SIZE = 100
TEST_SIZE = 500
SEQ_LENGTH = 10
EMBED_SIZE = 1
BATCH_SIZE = 32
NUM_EPOCHS = 10
LEARNING_RATE = 1e-3
def generate_sequence(seq_len):
xs = np.random.random(seq_len)
ys = np.array([0 if x < 2.5 else 1 for x in np.cumsum(xs).tolist()])
return xs, ys
X, Y = generate_sequence(SEQ_LENGTH)
print(X)
print(Y)
def generate_data(seq_len, num_seqs):
xseq, yseq = [], []
for i in range(num_seqs):
X, Y = generate_sequence(seq_len)
xseq.append(X)
yseq.append(Y)
return np.expand_dims(np.array(xseq), axis=2), np.array(yseq)
Xtrain, Ytrain = generate_data(SEQ_LENGTH, TRAIN_SIZE)
Xval, Yval = generate_data(SEQ_LENGTH, VAL_SIZE)
Xtest, Ytest = generate_data(SEQ_LENGTH, TEST_SIZE)
print(Xtrain.shape, Ytrain.shape, Xval.shape, Yval.shape, Xtest.shape, Ytest.shape)
class CumSumPredictor(nn.Module):
def __init__(self, seq_len, input_dim, hidden_dim, output_dim):
super(CumSumPredictor, self).__init__()
self.seq_len = seq_len
self.hidden_dim = hidden_dim
self.output_dim = output_dim
# network layers
self.enc_lstm = nn.LSTM(input_dim, hidden_dim, 1, batch_first=True,
bidirectional=True)
self.fcn = nn.Linear(hidden_dim * 2, output_dim) # bidirectional input
self.fcn_relu = nn.ReLU()
self.fcn_softmax = nn.Softmax()
def forward(self, x):
if torch.cuda.is_available():
h = (Variable(torch.randn(2, x.size(0), self.hidden_dim).cuda()),
Variable(torch.randn(2, x.size(0), self.hidden_dim).cuda()))
else:
h = (Variable(torch.randn(2, x.size(0), self.hidden_dim)),
Variable(torch.randn(2, x.size(0), self.hidden_dim)))
x, h = self.enc_lstm(x, h) # encoder LSTM
x_fcn = Variable(torch.zeros(x.size(0), self.seq_len, self.output_dim))
for i in range(self.seq_len): # decoder LSTM -> fcn for each timestep
x_fcn[:, i, :] = self.fcn_softmax(self.fcn_relu(self.fcn(x[:, i, :])))
x = x_fcn
return x
model = CumSumPredictor(SEQ_LENGTH, EMBED_SIZE, 50, 2)
if torch.cuda.is_available():
model.cuda()
print(model)
# size debugging
print("--- size debugging ---")
inp = Variable(torch.randn(BATCH_SIZE, SEQ_LENGTH, EMBED_SIZE))
outp = model(inp)
print(outp.size())
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
def compute_accuracy(pred_var, true_var):
if torch.cuda.is_available():
ypred = pred_var.cpu().data.numpy()
ytrue = true_var.cpu().data.numpy()
else:
ypred = pred_var.data.numpy()
ytrue = true_var.data.numpy()
pred_nums, true_nums = [], []
for i in range(pred_var.size(0)): # for each row of output
pred_nums.append(int("".join([str(x) for x in ypred[i].tolist()]), 2))
true_nums.append(int("".join([str(x) for x in ytrue[i].tolist()]), 2))
return pred_nums, true_nums, accuracy_score(pred_nums, true_nums)
history = []
for epoch in range(NUM_EPOCHS):
num_batches = Xtrain.shape[0] // BATCH_SIZE
shuffled_indices = np.random.permutation(np.arange(Xtrain.shape[0]))
train_loss, train_acc = 0., 0.
for bid in range(num_batches):
# extract one batch of data
Xbatch_data = Xtrain[shuffled_indices[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]]
Ybatch_data = Ytrain[shuffled_indices[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]]
Xbatch = Variable(torch.from_numpy(Xbatch_data).float())
Ybatch = Variable(torch.from_numpy(Ybatch_data).long())
if torch.cuda.is_available():
Xbatch = Xbatch.cuda()
Ybatch = Ybatch.cuda()
# initialize gradients
optimizer.zero_grad()
# forward
loss = 0.
Ybatch_ = model(Xbatch)
for i in range(Ybatch.size(1)):
loss += loss_fn(Ybatch_[:, i, :], Ybatch[:, i])
# backward
loss.backward()
train_loss += loss.data[0]
_, ybatch_ = Ybatch_.max(2)
_, _, acc = compute_accuracy(ybatch_, Ybatch)
train_acc += acc
optimizer.step()
# compute training loss and accuracy
train_loss /= num_batches
train_acc /= num_batches
# compute validation loss and accuracy
val_loss, val_acc = 0., 0.
num_val_batches = Xval.shape[0] // BATCH_SIZE
for bid in range(num_val_batches):
# data
Xbatch_data = Xval[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]
Ybatch_data = Yval[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]
Xbatch = Variable(torch.from_numpy(Xbatch_data).float())
Ybatch = Variable(torch.from_numpy(Ybatch_data).long())
if torch.cuda.is_available():
Xbatch = Xbatch.cuda()
Ybatch = Ybatch.cuda()
loss = 0.
Ybatch_ = model(Xbatch)
for i in range(Ybatch.size(1)):
loss += loss_fn(Ybatch_[:, i, :], Ybatch[:, i])
val_loss += loss.data[0]
_, ybatch_ = Ybatch_.max(2)
_, _, acc = compute_accuracy(ybatch_, Ybatch)
val_acc += acc
val_loss /= num_val_batches
val_acc /= num_val_batches
torch.save(model.state_dict(), MODEL_FILE.format(epoch+1))
print("Epoch {:2d}/{:d}: loss={:.3f}, acc={:.3f}, val_loss={:.3f}, val_acc={:.3f}"
.format((epoch+1), NUM_EPOCHS, train_loss, train_acc, val_loss, val_acc))
history.append((train_loss, val_loss, train_acc, val_acc))
losses = [x[0] for x in history]
val_losses = [x[1] for x in history]
accs = [x[2] for x in history]
val_accs = [x[3] for x in history]
plt.subplot(211)
plt.title("Accuracy")
plt.plot(accs, color="r", label="train")
plt.plot(val_accs, color="b", label="valid")
plt.legend(loc="best")
plt.subplot(212)
plt.title("Loss")
plt.plot(losses, color="r", label="train")
plt.plot(val_losses, color="b", label="valid")
plt.legend(loc="best")
plt.tight_layout()
plt.show()
saved_model = CumSumPredictor(SEQ_LENGTH, EMBED_SIZE, 50, 2)
saved_model.load_state_dict(torch.load(MODEL_FILE.format(NUM_EPOCHS)))
if torch.cuda.is_available():
saved_model.cuda()
ylabels, ypreds = [], []
num_test_batches = Xtest.shape[0] // BATCH_SIZE
for bid in range(num_test_batches):
Xbatch_data = Xtest[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]
Ybatch_data = Ytest[bid * BATCH_SIZE : (bid + 1) * BATCH_SIZE]
Xbatch = Variable(torch.from_numpy(Xbatch_data).float())
Ybatch = Variable(torch.from_numpy(Ybatch_data).long())
if torch.cuda.is_available():
Xbatch = Xbatch.cuda()
Ybatch = Ybatch.cuda()
Ybatch_ = saved_model(Xbatch)
_, ybatch_ = Ybatch_.max(2)
pred_nums, true_nums, _ = compute_accuracy(ybatch_, Ybatch)
ylabels.extend(true_nums)
ypreds.extend(pred_nums)
print("Test accuracy: {:.3f}".format(accuracy_score(ylabels, ypreds)))
Xbatch_data = Xtest[0:10]
Ybatch_data = Ytest[0:10]
Xbatch = Variable(torch.from_numpy(Xbatch_data).float())
Ybatch = Variable(torch.from_numpy(Ybatch_data).long())
if torch.cuda.is_available():
Xbatch = Xbatch.cuda()
Ybatch = Ybatch.cuda()
Ybatch_ = saved_model(Xbatch)
_, ybatch_ = Ybatch_.max(2)
if torch.cuda.is_available():
ybatch__data = ybatch_.cpu().data.numpy()
else:
ybatch__data = ybatch_.data.numpy()
for i in range(Ybatch_data.shape[0]):
label = Ybatch_data[i]
pred = ybatch__data[i]
correct = "True" if np.array_equal(label, pred) else "False"
print("y={:s}, yhat={:s}, correct={:s}".format(str(label), str(pred), correct))
for i in range(NUM_EPOCHS):
os.remove(MODEL_FILE.format(i + 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'sandbox-1', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Recurrent Neural Networks
Step2: Vanilla RNN
Step3: Vanilla RNN
Step4: Vanilla RNN
Step5: Word embedding
Step6: Word embedding
Step7: Inline Question
Step8: Temporal Softmax loss
Step9: Inline Question
Step10: Run the following cell to perform numeric gradient checking on the TestRNN class; you should errors around 1e-7 or less.
Step11: LSTM
Step12: LSTM
Step13: LSTM
Step14: LSTM
Step15: LSTM model
Step16: Let's have some fun!!
Step17: simply run the following code to construct training dataset<br />
Step18: We defined a LanguageModelRNN class for you to fill in the TODO block in rnn.py.
Step19: Simply run the following code block to check the loss and accuracy curve.
Step20: Now you can generate a text using the trained model. You can also start from a specific word in the original text. If you trained your model with "The Fox", you can check how well it is modeled by starting from "dog", "cat", etc.
|
<ASSISTANT_TASK:>
Python Code:
from lib.rnn import *
from lib.layer_utils import *
from lib.grad_check import *
from lib.optim import *
from lib.train import *
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
N, D, H = 3, 10, 4
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.linspace(-0.4, 0.7, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.2, 0.5, num=N*H).reshape(N, H)
rnn.params[rnn.wx_name] = np.linspace(-0.1, 0.9, num=D*H).reshape(D, H)
rnn.params[rnn.wh_name] = np.linspace(-0.3, 0.7, num=H*H).reshape(H, H)
rnn.params[rnn.b_name] = np.linspace(-0.2, 0.4, num=H)
next_h, _ = rnn.step_forward(x, prev_h)
expected_next_h = np.asarray([
[-0.58172089, -0.50182032, -0.41232771, -0.31410098],
[ 0.66854692, 0.79562378, 0.87755553, 0.92795967],
[ 0.97934501, 0.99144213, 0.99646691, 0.99854353]])
print('next_h error: ', rel_error(expected_next_h, next_h))
np.random.seed(599)
N, D, H = 4, 5, 6
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
rnn.params[rnn.wx_name] = Wx
rnn.params[rnn.wh_name] = Wh
rnn.params[rnn.b_name] = b
out, meta = rnn.step_forward(x, h)
dnext_h = np.random.randn(*out.shape)
dx_num = eval_numerical_gradient_array(lambda x: rnn.step_forward(x, h)[0], x, dnext_h)
dprev_h_num = eval_numerical_gradient_array(lambda h: rnn.step_forward(x, h)[0], h, dnext_h)
dWx_num = eval_numerical_gradient_array(lambda Wx: rnn.step_forward(x, h)[0], Wx, dnext_h)
dWh_num = eval_numerical_gradient_array(lambda Wh: rnn.step_forward(x, h)[0], Wh, dnext_h)
db_num = eval_numerical_gradient_array(lambda b: rnn.step_forward(x, h)[0], b, dnext_h)
dx, dprev_h, dWx, dWh, db = rnn.step_backward(dnext_h, meta)
print('dx error: ', rel_error(dx_num, dx))
print('dprev_h error: ', rel_error(dprev_h_num, dprev_h))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
N, T, D, H = 2, 3, 4, 5
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.linspace(-0.1, 0.3, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.3, 0.1, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.4, num=D*H).reshape(D, H)
Wh = np.linspace(-0.4, 0.1, num=H*H).reshape(H, H)
b = np.linspace(-0.7, 0.1, num=H)
rnn.params[rnn.wx_name] = Wx
rnn.params[rnn.wh_name] = Wh
rnn.params[rnn.b_name] = b
h = rnn.forward(x, h0)
expected_h = np.asarray([
[
[-0.42070749, -0.27279261, -0.11074945, 0.05740409, 0.22236251],
[-0.39525808, -0.22554661, -0.0409454, 0.14649412, 0.32397316],
[-0.42305111, -0.24223728, -0.04287027, 0.15997045, 0.35014525],
],
[
[-0.55857474, -0.39065825, -0.19198182, 0.02378408, 0.23735671],
[-0.27150199, -0.07088804, 0.13562939, 0.33099728, 0.50158768],
[-0.51014825, -0.30524429, -0.06755202, 0.17806392, 0.40333043]]])
print('h error: ', rel_error(expected_h, h))
np.random.seed(599)
N, D, T, H = 2, 3, 10, 5
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
rnn.params[rnn.wx_name] = Wx
rnn.params[rnn.wh_name] = Wh
rnn.params[rnn.b_name] = b
out = rnn.forward(x, h0)
dout = np.random.randn(*out.shape)
dx, dh0 = rnn.backward(dout)
dx_num = eval_numerical_gradient_array(lambda x: rnn.forward(x, h0), x, dout)
dh0_num = eval_numerical_gradient_array(lambda h0: rnn.forward(x, h0), h0, dout)
dWx_num = eval_numerical_gradient_array(lambda Wx: rnn.forward(x, h0), Wx, dout)
dWh_num = eval_numerical_gradient_array(lambda Wh: rnn.forward(x, h0), Wh, dout)
db_num = eval_numerical_gradient_array(lambda b: rnn.forward(x, h0), b, dout)
dWx = rnn.grads[rnn.wx_name]
dWh = rnn.grads[rnn.wh_name]
db = rnn.grads[rnn.b_name]
print('dx error: ', rel_error(dx_num, dx))
print('dh0 error: ', rel_error(dh0_num, dh0))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
N, T, V, D = 2, 4, 5, 3
we = word_embedding(V, D, name="we")
x = np.asarray([[0, 3, 1, 2], [2, 1, 0, 3]])
W = np.linspace(0, 1, num=V*D).reshape(V, D)
we.params[we.w_name] = W
out = we.forward(x)
expected_out = np.asarray([
[[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0.42857143, 0.5, 0.57142857]],
[[ 0.42857143, 0.5, 0.57142857],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429]]])
print('out error: ', rel_error(expected_out, out))
np.random.seed(599)
N, T, V, D = 50, 3, 5, 6
we = word_embedding(V, D, name="we")
x = np.random.randint(V, size=(N, T))
W = np.random.randn(V, D)
we.params[we.w_name] = W
out = we.forward(x)
dout = np.random.randn(*out.shape)
we.backward(dout)
dW = we.grads[we.w_name]
f = lambda W: we.forward(x)
dW_num = eval_numerical_gradient_array(f, W, dout)
print('dW error: ', rel_error(dW, dW_num))
np.random.seed(599)
# Gradient check for temporal affine layer
N, T, D, M = 2, 3, 4, 5
t_fc = temporal_fc(D, M, init_scale=0.02, name='test_t_fc')
x = np.random.randn(N, T, D)
w = np.random.randn(D, M)
b = np.random.randn(M)
t_fc.params[t_fc.w_name] = w
t_fc.params[t_fc.b_name] = b
out = t_fc.forward(x)
dout = np.random.randn(*out.shape)
dx_num = eval_numerical_gradient_array(lambda x: t_fc.forward(x), x, dout)
dw_num = eval_numerical_gradient_array(lambda w: t_fc.forward(x), w, dout)
db_num = eval_numerical_gradient_array(lambda b: t_fc.forward(x), b, dout)
dx = t_fc.backward(dout)
dw = t_fc.grads[t_fc.w_name]
db = t_fc.grads[t_fc.b_name]
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
loss_func = temporal_softmax_loss()
# Sanity check for temporal softmax loss
N, T, V = 100, 1, 10
def check_loss(N, T, V, p):
x = 0.001 * np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = np.random.rand(N, T) <= p
print(loss_func.forward(x, y, mask))
check_loss(100, 1, 10, 1.0) # Should be about 2.3
check_loss(100, 10, 10, 1.0) # Should be about 23
check_loss(5000, 10, 10, 0.1) # Should be about 2.3
# Gradient check for temporal softmax loss
N, T, V = 7, 8, 9
x = np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = (np.random.rand(N, T) > 0.5)
loss = loss_func.forward(x, y, mask)
dx = loss_func.backward()
dx_num = eval_numerical_gradient(lambda x: loss_func.forward(x, y, mask), x, verbose=False)
print('dx error: ', rel_error(dx, dx_num))
N, D, H = 10, 20, 40
V = 4
T = 13
model = TestRNN(D, H, cell_type='rnn')
loss_func = temporal_softmax_loss()
# Set all model parameters to fixed values
for k, v in model.params.items():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
model.assign_params()
features = np.linspace(-1.5, 0.3, num=(N * D * T)).reshape(N, T, D)
h0 = np.linspace(-1.5, 0.5, num=(N*H)).reshape(N, H)
labels = (np.arange(N * T) % V).reshape(N, T)
pred = model.forward(features, h0)
# You'll need this
mask = np.ones((N, T))
loss = loss_func.forward(pred, labels, mask)
dLoss = loss_func.backward()
expected_loss = 51.0949189134
print('loss: ', loss)
print('expected loss: ', expected_loss)
print('difference: ', abs(loss - expected_loss))
np.random.seed(599)
batch_size = 2
timesteps = 3
input_dim = 4
hidden_dim = 6
label_size = 4
labels = np.random.randint(label_size, size=(batch_size, timesteps))
features = np.random.randn(batch_size, timesteps, input_dim)
h0 = np.random.randn(batch_size, hidden_dim)
model = TestRNN(input_dim, hidden_dim, cell_type='rnn')
loss_func = temporal_softmax_loss()
pred = model.forward(features, h0)
# You'll need this
mask = np.ones((batch_size, timesteps))
loss = loss_func.forward(pred, labels, mask)
dLoss = loss_func.backward()
dout, dh0 = model.backward(dLoss)
grads = model.grads
for param_name in sorted(grads):
f = lambda _: loss_func.forward(model.forward(features, h0), labels, mask)
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s relative error: %e' % (param_name, e))
N, D, H = 3, 4, 5
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.linspace(-0.4, 1.2, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.3, 0.7, num=N*H).reshape(N, H)
prev_c = np.linspace(-0.4, 0.9, num=N*H).reshape(N, H)
Wx = np.linspace(-2.1, 1.3, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.7, 2.2, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.3, 0.7, num=4*H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
next_h, next_c, cache = lstm.step_forward(x, prev_h, prev_c)
expected_next_h = np.asarray([
[ 0.24635157, 0.28610883, 0.32240467, 0.35525807, 0.38474904],
[ 0.49223563, 0.55611431, 0.61507696, 0.66844003, 0.7159181 ],
[ 0.56735664, 0.66310127, 0.74419266, 0.80889665, 0.858299 ]])
expected_next_c = np.asarray([
[ 0.32986176, 0.39145139, 0.451556, 0.51014116, 0.56717407],
[ 0.66382255, 0.76674007, 0.87195994, 0.97902709, 1.08751345],
[ 0.74192008, 0.90592151, 1.07717006, 1.25120233, 1.42395676]])
print('next_h error: ', rel_error(expected_next_h, next_h))
print('next_c error: ', rel_error(expected_next_c, next_c))
np.random.seed(599)
N, D, H = 4, 5, 6
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.random.randn(N, D)
prev_h = np.random.randn(N, H)
prev_c = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
next_h, next_c, cache = lstm.step_forward(x, prev_h, prev_c)
dnext_h = np.random.randn(*next_h.shape)
dnext_c = np.random.randn(*next_c.shape)
fx_h = lambda x: lstm.step_forward(x, prev_h, prev_c)[0]
fh_h = lambda h: lstm.step_forward(x, prev_h, prev_c)[0]
fc_h = lambda c: lstm.step_forward(x, prev_h, prev_c)[0]
fWx_h = lambda Wx: lstm.step_forward(x, prev_h, prev_c)[0]
fWh_h = lambda Wh: lstm.step_forward(x, prev_h, prev_c)[0]
fb_h = lambda b: lstm.step_forward(x, prev_h, prev_c)[0]
fx_c = lambda x: lstm.step_forward(x, prev_h, prev_c)[1]
fh_c = lambda h: lstm.step_forward(x, prev_h, prev_c)[1]
fc_c = lambda c: lstm.step_forward(x, prev_h, prev_c)[1]
fWx_c = lambda Wx: lstm.step_forward(x, prev_h, prev_c)[1]
fWh_c = lambda Wh: lstm.step_forward(x, prev_h, prev_c)[1]
fb_c = lambda b: lstm.step_forward(x, prev_h, prev_c)[1]
num_grad = eval_numerical_gradient_array
dx_num = num_grad(fx_h, x, dnext_h) + num_grad(fx_c, x, dnext_c)
dh_num = num_grad(fh_h, prev_h, dnext_h) + num_grad(fh_c, prev_h, dnext_c)
dc_num = num_grad(fc_h, prev_c, dnext_h) + num_grad(fc_c, prev_c, dnext_c)
dWx_num = num_grad(fWx_h, Wx, dnext_h) + num_grad(fWx_c, Wx, dnext_c)
dWh_num = num_grad(fWh_h, Wh, dnext_h) + num_grad(fWh_c, Wh, dnext_c)
db_num = num_grad(fb_h, b, dnext_h) + num_grad(fb_c, b, dnext_c)
dx, dh, dc, dWx, dWh, db = lstm.step_backward(dnext_h, dnext_c, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dh error: ', rel_error(dh_num, dh))
print('dc error: ', rel_error(dc_num, dc))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
N, D, H, T = 2, 5, 4, 3
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.linspace(-0.4, 0.6, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.4, 0.8, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.9, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.3, 0.6, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.2, 0.7, num=4*H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
h = lstm.forward(x, h0)
expected_h = np.asarray([
[[ 0.01764008, 0.01823233, 0.01882671, 0.0194232 ],
[ 0.11287491, 0.12146228, 0.13018446, 0.13902939],
[ 0.31358768, 0.33338627, 0.35304453, 0.37250975]],
[[ 0.45767879, 0.4761092, 0.4936887, 0.51041945],
[ 0.6704845, 0.69350089, 0.71486014, 0.7346449 ],
[ 0.81733511, 0.83677871, 0.85403753, 0.86935314]]])
print('h error: ', rel_error(expected_h, h))
np.random.seed(599)
N, D, T, H = 2, 3, 10, 6
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
out = lstm.forward(x, h0)
dout = np.random.randn(*out.shape)
dx, dh0 = lstm.backward(dout)
dWx = lstm.grads[lstm.wx_name]
dWh = lstm.grads[lstm.wh_name]
db = lstm.grads[lstm.b_name]
dx_num = eval_numerical_gradient_array(lambda x: lstm.forward(x, h0), x, dout)
dh0_num = eval_numerical_gradient_array(lambda h0: lstm.forward(x, h0), h0, dout)
dWx_num = eval_numerical_gradient_array(lambda Wx: lstm.forward(x, h0), Wx, dout)
dWh_num = eval_numerical_gradient_array(lambda Wh: lstm.forward(x, h0), Wh, dout)
db_num = eval_numerical_gradient_array(lambda b: lstm.forward(x, h0), b, dout)
print('dx error: ', rel_error(dx_num, dx))
print('dh0 error: ', rel_error(dh0_num, dh0))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
N, D, H = 10, 20, 40
V = 4
T = 13
model = TestRNN(D, H, cell_type='lstm')
loss_func = temporal_softmax_loss()
# Set all model parameters to fixed values
for k, v in model.params.items():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
model.assign_params()
features = np.linspace(-1.5, 0.3, num=(N * D * T)).reshape(N, T, D)
h0 = np.linspace(-1.5, 0.5, num=(N*H)).reshape(N, H)
labels = (np.arange(N * T) % V).reshape(N, T)
pred = model.forward(features, h0)
# You'll need this
mask = np.ones((N, T))
loss = loss_func.forward(pred, labels, mask)
dLoss = loss_func.backward()
expected_loss = 49.2140256354
print('loss: ', loss)
print('expected loss: ', expected_loss)
print('difference: ', abs(loss - expected_loss))
from ipywidgets import widgets, interact
from IPython.display import display
input_text = widgets.Text()
input_text.value = "Paste your own text words here and hit Enter."
def f(x):
print('set!!')
print(x.value)
input_text.on_submit(f)
input_text
# copy paste your text source in the box below and hit enter.
# If you don't have any preference,
# you can copy paste the lyrics from here https://www.azlyrics.com/lyrics/ylvis/thefox.html
import re
text = re.split(' |\n',input_text.value.lower()) # all words are converted into lower case
outputSize = len(text)
word_list = list(set(text))
dataSize = len(word_list)
output = np.zeros(outputSize)
for i in range(0, outputSize):
index = np.where(np.asarray(word_list) == text[i])
output[i] = index[0]
data_labels = output.astype(np.int)
gt_labels = data_labels[1:]
data_labels = data_labels[:-1]
print('Input text size: %s' % outputSize)
print('Input word number: %s' % dataSize)
# you can change the following parameters.
D = 10 # input dimention
H = 20 # hidden space dimention
T = 50 # timesteps
N = 10 # batch size
max_epoch = 100 # max epoch size
loss_func = temporal_softmax_loss()
# you can change the cell_type between 'rnn' and 'lstm'.
model = LanguageModelRNN(dataSize, D, H, cell_type='rnn')
optimizer = Adam(model, 5e-4)
data = { 'data_train': data_labels, 'labels_train': gt_labels }
results = train_net(data, model, loss_func, optimizer, timesteps=T, batch_size=N, max_epochs=max_epoch, verbose=True)
opt_params, loss_hist, train_acc_hist = results
# Plot the learning curves
plt.subplot(2, 1, 1)
plt.title('Training loss')
loss_hist_ = loss_hist[1::100] # sparse the curve a bit
plt.plot(loss_hist_, '-o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(train_acc_hist, '-o', label='Training')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()
# you can change the generated text length below.
text_length = 100
idx = 0
# you also can start from specific word.
# since the words are all converted into lower case, make sure you put lower case below.
idx = int(np.where(np.asarray(word_list) == 'dog')[0])
# sample from the trained model
words = model.sample(idx, text_length-1)
# convert indices into words
output = [ word_list[i] for i in words]
print(' '.join(output))
# you can change the following parameters.
D = 10 # input dimention
H = 20 # hidden space dimention
T = 50 # timesteps
N = 10 # batch size
max_epoch = 100 # max epoch size
loss_func = temporal_softmax_loss()
# you can change the cell_type between 'rnn' and 'lstm'.
model = LanguageModelRNN(dataSize, D, H, cell_type='rnn')
optimizer = Adam(model, 5e-4)
data = { 'data_train': data_labels, 'labels_train': gt_labels }
results = train_net(data, model, loss_func, optimizer, timesteps=T, batch_size=N, max_epochs=max_epoch, verbose=True)
text_length = 100
idx = 0
# you also can start from specific word.
# since the words are all converted into lower case, make sure you put lower case below.
idx = int(np.where(np.asarray(word_list) == 'dog')[0])
# sample from the trained model
words = model.sample(idx, text_length-1)
# convert indices into words
output = [ word_list[i] for i in words]
print(' '.join(output))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What do we see?
Step2: <code>**</code> operator can be used to calculate powers
Step3: String
Step4: Print a string using <code>print</code> statement.
Step5: String literals can span multiple lines using triple-quotes
Step7: Concatenate string using <code>+</code>
Step8: Access a character in string using index, first index is 0
Step9: Slicing is also supported.
Step10: Python strings cannot be changed - they are immutable
Step11: But if you need a different string, you can create a new one
Step12: <code>len()</code> function can be used to calculate length of string
Step13: List
Step14: Like strings (and all other built-in sequence types), list can be indexed and sliced
Step15: However, unlike strings, list are mutable type. It is possible to change their content
Step16: Concatenate lists using <code>+</code> operator
Step17: Assignment to slices is also possible
Step18: Unpacking
Step19: Number of variables and length of sequence must match
|
<ASSISTANT_TASK:>
Python Code:
for number in [0, 1, 2, 3, 4, 5, 6]:
if number % 2 == 0:
print "Even number:", number
(50 - 5.0 * 6) / 4
5 ** 2
"I can eat glass it doesn't hurt me"
'I can eat glass it doesn\'t hurt me'
print 'I can eat glass.\nIt doesn\'t hurt me'
print '---------------'
print r'I can eat glass.\n It doesn\'t hurt me'
print '''I can eat glass
It doesn't hurt me'''
print 'AA' + 'BB'
print 'AA' * 3
word = 'python'
print word[0]
print word[-1]
print word[-2]
# Get characters from position 1 to position 4 (4 characters)
print word[1:5] # exclude 5
# Get characters from position 1 to the end
print word[1:]
# Get all characters except the final character
print word[:-1]
# Get 3 last characters
print word[-3:]
# get error when trying to change
word[0] = 'J'
print 'J' + word[1:]
print len(word)
squares = [1, 4, 9, 16, 25]
print squares
print squares[0] # Indexing returns the item
print squares[-1]
print squares[-3:] # Slicing returns a new list
cubes = [1, 8, 27, 65, 125] # something's wrong here
print cubes
cubes[3] = 64
print cubes
print squares + [36, 49]
squares.append(36)
print squares
print len(squares)
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
print letters
letters[2:5] = ['C', 'D', 'E']
print letters
letters[2:5] = ['-']
print letters
a, b, c = [1, 2, 3]
print a, b, c
a, b, c = [1, 2]
a, b = 1, 2
a, b = b, a
print a, b
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cargando los datos y explorándolos
Step2: Vemos que los datos no están en formato CSV, sino que la delimitación son espacios. Si intentamos cargarlos con pandas no tendremos mucho éxito
Step3: Tenemos que hacer los siguientes cambios
Step4: Las fechas también se pueden parsear de manera manual con el argumento
Step5: En una dataframe pueden convivir datos de tipo diferente en diferentes columnas
Step6: Una vez hemos cargado los datos, estamos preparados para analizarlos utilizando toda la artillería de pandas. Por ejemplo, puede que queramos una descripción estadística rápida
Step7: Accediendo a los datos
Step8: Del mismo modo que accedmos, podemos operar con ellos
Step9: e introducirlos en funciones
Step10: Filas
Step11: Puedo incluso hacer secciones basadas en fechas
Step12: Filtrando los datos
Step13: Funciones "rolling"
Step14: Creación de nuevas columnas
Step15: Creando agrupaciones
Step16: Con estos grupos podemos hacer hacer varias cosas
Step17: Creando agrupaciones
Step18: La línea anterior no es sencilla y no se escribe de una sola vez sin errores (sobre todo si estás empezando). Esto es una ejemplo de que pandas es una librería potente, pero que lleva tiempo aprender. Pasarás muchas horas peleando contra problemas de este tipo, pero afortunadamente mucha gente lo ha pasado mal antes y su experiencia ha quedado plasmada en cientos de preguntas de stack overflow y en la documentación de pandas
Step19: Cajas
Step20: Pintando los datos de un "típíco día d del mes m del año a
Step21: Visualizaciones especiales
|
<ASSISTANT_TASK:>
Python Code:
# Importamos pandas
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import HTML
HTML('<iframe src="http://www.juntadeandalucia.es/agriculturaypesca/ifapa/ria/servlet/FrontController?action=Static&url=coordenadas.jsp&c_provincia=4&c_estacion=4" width="700" height="400"></iframe>')
# Vemos qué pinta tiene el fichero
# (esto es un comando de la terminal, no de python
# y solo funcionará en Linux o MAC)
!head ../data/tabernas_meteo_data.txt
# Tratamos de cargarlo en pandas
pd.read_csv("../data/tabernas_meteo_data.txt").head(5)
data = pd.read_csv(
"../data/tabernas_meteo_data.txt",
delim_whitespace=True, # delimitado por espacios en blanco
usecols=(0, 2, 3, 4, 5), # columnas que queremos usar
skiprows=2, # saltar las dos primeras líneas
names=['DATE', 'TMAX', 'TMIN', 'TMED', 'PRECIP'],
parse_dates=['DATE'],
# date_parser=lambda x: pd.datetime.strptime(x, '%d-%m-%y'), # Parseo manual
dayfirst=True, # ¡Importante
index_col=["DATE"] # Si queremos indexar por fechas
)
# Ordenando de más antigua a más moderna
data.sort_index(inplace=True)
# Mostrando sólo las primeras o las últimas líneas
data.head()
# Comprobamos los tipos de datos de la columnas
data.dtypes
# Pedomos información general del dataset
data.info()
data.index.dayofweek
# Descripción estadística
data.describe()
# Accediendo como clave
data['TMAX'].head()
# Accediendo como atributo
data.TMIN.head()
# Accediendo a varias columnas a la vez
data[['TMAX', 'TMIN']].head()
# Modificando valores de columnas
data[['TMAX', 'TMIN']] / 10
# Aplicando una función a una columna entera (ej. media numpy)
import numpy as np
np.mean(data.TMAX)
# Calculando la media con pandas
data.TMAX.mean()
# Accediendo a una fila por índice
data.iloc[1]
# Accediendo a una fila por etiqueta
data.loc["2016-09-02"]
data.loc["2016-12-01":]
# Comprobando que registros carecen de datos válidos
data.TMIN.isnull().head()
# Accediendo a los registros que cumplen una condición
data.loc[data.TMIN.isnull()]
# Valores de precipitación por encima de la media:
print(data.PRECIP.mean())
data[data.PRECIP > data.PRECIP.mean()]
# Calcular la media de la columna TMAX
data.TMAX.head(15)
# Media trimensual centrada
data.TMAX.rolling(5, center=True).mean().head(15)
# Agruparemos por año y día: creemos dos columnas nuevas
data['year'] = data.index.year
data['month'] = data.index.month
# Creamos la agrupación
monthly = data.groupby(by=['year', 'month'])
# Podemos ver los grupos que se han creado
monthly.groups.keys()
# Accedemos a un grupo
monthly.get_group((2016,3)).head()
# Hhacemos una agregación de los datos:
monthly_mean = monthly.mean()
monthly_mean.head(24)
# Dejar los años como índices y ver la media mensual en cada columna
monthly_mean.reset_index().pivot(index='year', columns='month')
# Pintar la temperatura máx, min, med
data.plot(y=["TMAX", "TMIN", "TMED"])
plt.title('Temperaturas')
data.loc[:, 'TMAX':'PRECIP'].plot.box()
group_daily = data.groupby(['month', data.index.day])
daily_agg = group_daily.agg({'TMED': 'mean', 'TMAX': 'max', 'TMIN': 'min', 'PRECIP': 'mean'})
daily_agg.head()
daily_agg.plot(y=['TMED', 'TMAX', 'TMIN'])
# scatter_matrix
from pandas.tools.plotting import scatter_matrix
axes = scatter_matrix(data.loc[:, "TMAX":"TMED"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.preprocessing import StandardScaler
#Initalise standard scaler
scaler = StandardScaler()
#Fit the scaler using X_train data
scaler.fit(X_train)
#Transform X_train and X_test using the scaler and convert back to DataFrame
X_train = pd.DataFrame(scaler.transform(X_train), columns = X_train.columns)
X_test = pd.DataFrame(scaler.transform(X_test), columns = X_test.columns)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'sandbox-3', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship
Step3: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
Step5: Tip
Step6: Question 1
Step7: Answer
Step9: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction
Step10: Question 2
Step11: Answer
Step13: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction
Step14: Question 3
Step15: Answer
Step17: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Step18: Question 4
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
def accuracy_score(truth, pred):
Returns accuracy score for input truth and predictions.
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
def predictions_0(data):
Model with no features. Always predicts a passenger did not survive.
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Sex')
def predictions_1(data):
Model with one feature:
- Predict a passenger survived if they are female.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
predictions.append(passenger["Sex"] == "female")
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
def predictions_2(data):
Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
survival_status = False
if passenger["Sex"] == "female":
survival_status = True
elif passenger["Age"] < 10:
survival_status = True
predictions.append(survival_status)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, "Age", ["Sex == 'female'", "Pclass == 3"])
def predictions_3(data):
Model with multiple features. Makes a prediction with an accuracy of at least 80%.
male = 0
male_incorrect = 0
female = 0
female_incorrect = 0
predictions = []
i = 0
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
survival_status = False
# Female
if passenger["Sex"] == "female":
# First or second class
if passenger["Pclass"] == 1 or passenger["Pclass"] == 2:
survival_status = True
# Third class
else:
# Passenger did not embark in Southampton
if passenger["Embarked"] != "S":
survival_status = True
# Accumualte mis-predictions
female += 1
if survival_status != outcomes[i]:
female_incorrect += 1
# Male
else:
# Younger than 10
if passenger["Age"] < 10:
if passenger["SibSp"] <= 2:
survival_status = True
# Accumulate mis-predictions
male += 1
if survival_status != outcomes[i]:
male_incorrect += 1
predictions.append(survival_status)
i += 1
# Print accumulated mis-prediction rates
print("Mis-prediction rates:")
print("Male: {}/{} = {}".format(male_incorrect, male, 1.0 * male_incorrect/male))
print("Female: {}/{} = {}".format(female_incorrect, female, 1.0 * female_incorrect/female))
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Usually, this kind of data it's commonly used to create scoring models. With the tools already studied, we could achieve this task easily. However this time, we would like to know which variables are important to explain Balance account of a given client?. In other words, we would like to know if it is a statistical relation between Balance and the other variables. For now, take Gender to test this hypothesis.
Step2: It seems that Balance account distribution doesn't change across Gender. But, if we calculate the mean value of the Balance by Male and Female?
Step3: So, we got it?, is this difference between Male and Female Balance enough to answer the initial question?
Step4: First calculate the statistics (mean difference) in the data.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Credit.csv', index_col=0)
data.head(10)
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.pairplot(data,hue='Gender',palette="husl",markers="+")
# splitting data
male_= data[data.Gender==' Male'].Balance
female_ = data[data.Gender=='Female'].Balance
fig = plt.figure(figsize=(14,7))
n, bins, patches = plt.hist(male_, bins =50, facecolor='blue', alpha=0.5,label='Male')
n, bins, patches = plt.hist(female_, bins =50,facecolor='red', alpha=0.5,label='Female')
plt.axvline(male_.mean(),linestyle='--',color='blue',)
plt.axvline(female_.mean(),linestyle='--',color='red',)
plt.xlabel('Balance')
plt.legend();
Gender_differences = data.groupby('Gender').mean()
Gender_differences
print('The mean difference in Balance by Gender is : '+ str(Gender_differences.loc[' Male','Balance']-Gender_differences.loc['Female','Balance']))
# Building features and target variable
X = data.Gender.map({' Male': 1, 'Female':0})
Y = data.Balance
original_difference = female_.mean() - male_.mean()
print('The difference in Balance by Gender (in the data) is: '+ str(original_difference))
# Create a Data Frame with desiered variables
dataframe = pd.DataFrame(X)
dataframe['Balance'] = Y
dataframe.head()
# Step 1 & 2
def shuffle_data(frame):
vec = np.zeros(frame.Gender.count())#.astype(float)
vec[np.random.choice(frame.Gender.count(),int(sum(frame.Gender)),replace=False)] = 1
frame['Gender'] = vec
return frame
# Step 3
def mean_difference(frame):
return frame.groupby('Gender').mean().loc[0,'Balance'] - frame.groupby('Gender').mean().loc[1,'Balance']
import numpy as np
def simulate_distribution(frame, N=100):
a = []
for i in range(N):
a.append(mean_difference(shuffle_data(dataframe)))
return a
def plot_distribution(dist,data,color='blue',bins=bins,orig=True):
fig = plt.figure(figsize=(10,6))
n, bins, patches = plt.hist(dist, bins = bins, normed=1.0, facecolor=color, alpha=0.5)
values, base = np.histogram(dist, bins = bins)
if orig:
plt.axvline(np.mean(data), color=color, linestyle='dashed', linewidth=2,label='Original data')
plt.legend()
plt.title('Mean difference')
## Simulation
N = 1000
distribution = simulate_distribution(dataframe,N)
plot_distribution(distribution,original_difference,'blue',100)
# Calculating P-Value
def pvalue(dist,estimation):
return float(sum(np.array(dist)>estimation))/len(dist)
p_value = pvalue(distribution,original_difference)
p_value
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Artists
Step2: Containers are objects like Figure and Axes. Containers are given primitives to draw. The plotting functions we discussed back in Parts 1 & 2 are convenience functions that generate these primitives and places them into the appropriate containers. In fact, most of those functions will return artist objects (or a list of artist objects) as well as store them into the appropriate axes container.
Step3: To see what properties are set for an artist, use getp()
Step4: Collections
Step5: There are other kinds of collections that are not just simply a list of primitives, but are Artists in their own right. These special kinds of collections take advantage of various optimizations that can be assumed when rendering similar or identical things. You actually do use these collections all the time whether you realize it or not. Markers are (indirectly) implemented this way (so, whenever you do plot() or scatter(), for example).
Step6: Exercise 5.1
|
<ASSISTANT_TASK:>
Python Code:
Show examples of matplotlib artists
http://matplotlib.org/api/artist_api.html
Several examples of standard matplotlib graphics primitives (artists)
are drawn using matplotlib API. Full list of artists and the
documentation is available at
http://matplotlib.org/api/artist_api.html
Copyright (c) 2010, Bartosz Telenczuk
License: This work is licensed under the BSD. A copy should be
included with this source code, and is also available at
http://www.opensource.org/licenses/bsd-license.php
from matplotlib.collections import PatchCollection
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
fig, ax = plt.subplots(1, 1, figsize=(7,7))
# create 3x3 grid to plot the artists
pos = np.mgrid[0.2:0.8:3j, 0.2:0.8:3j].reshape(2, -1)
patches = []
# add a circle
art = mpatches.Circle(pos[:, 0], 0.1, ec="none")
patches.append(art)
plt.text(pos[0, 0], pos[1, 0] - 0.15, "Circle", ha="center", size=14)
# add a rectangle
art = mpatches.Rectangle(pos[:, 1] - [0.025, 0.05], 0.05, 0.1, ec="none")
patches.append(art)
plt.text(pos[0, 1], pos[1, 1] - 0.15, "Rectangle", ha="center", size=14)
# add a wedge
wedge = mpatches.Wedge(pos[:, 2], 0.1, 30, 270, ec="none")
patches.append(wedge)
plt.text(pos[0, 2], pos[1, 2] - 0.15, "Wedge", ha="center", size=14)
# add a Polygon
polygon = mpatches.RegularPolygon(pos[:, 3], 5, 0.1)
patches.append(polygon)
plt.text(pos[0, 3], pos[1, 3] - 0.15, "Polygon", ha="center", size=14)
#add an ellipse
ellipse = mpatches.Ellipse(pos[:, 4], 0.2, 0.1)
patches.append(ellipse)
plt.text(pos[0, 4], pos[1, 4] - 0.15, "Ellipse", ha="center", size=14)
#add an arrow
arrow = mpatches.Arrow(pos[0, 5] - 0.05, pos[1, 5] - 0.05, 0.1, 0.1, width=0.1)
patches.append(arrow)
plt.text(pos[0, 5], pos[1, 5] - 0.15, "Arrow", ha="center", size=14)
# add a path patch
Path = mpath.Path
verts = np.array([
(0.158, -0.257),
(0.035, -0.11),
(-0.175, 0.20),
(0.0375, 0.20),
(0.085, 0.115),
(0.22, 0.32),
(0.3, 0.005),
(0.20, -0.05),
(0.158, -0.257),
])
verts = verts - verts.mean(0)
codes = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CLOSEPOLY]
path = mpath.Path(verts / 2.5 + pos[:, 6], codes)
patch = mpatches.PathPatch(path)
patches.append(patch)
plt.text(pos[0, 6], pos[1, 6] - 0.15, "PathPatch", ha="center", size=14)
# add a fancy box
fancybox = mpatches.FancyBboxPatch(
pos[:, 7] - [0.025, 0.05], 0.05, 0.1,
boxstyle=mpatches.BoxStyle("Round", pad=0.02))
patches.append(fancybox)
plt.text(pos[0, 7], pos[1, 7] - 0.15, "FancyBoxPatch", ha="center", size=14)
# add a line
x,y = np.array([[-0.06, 0.0, 0.1], [0.05,-0.05, 0.05]])
line = mlines.Line2D(x+pos[0, 8], y+pos[1, 8], lw=5.)
plt.text(pos[0, 8], pos[1, 8] - 0.15, "Line2D", ha="center", size=14)
collection = PatchCollection(patches)
ax.add_collection(collection)
ax.add_line(line)
ax.set_axis_off()
plt.show()
fig, ax = plt.subplots(1, 1)
lines = plt.plot([1, 2, 3, 4], [1, 2, 3, 4], 'b', [1, 2, 3, 4], [4, 3, 2, 1], 'r')
lines[0].set(linewidth=5)
lines[1].set(linewidth=10, alpha=0.7)
plt.show()
fig = plt.figure()
print(plt.getp(fig.patch))
from matplotlib.collections import LineCollection
fig, ax = plt.subplots(1, 1)
lc = LineCollection([[(4, 10), (16, 10)],
[(2, 2), (10, 15), (6, 7)],
[(14, 3), (1, 1), (3, 5)]])
lc.set_color('r')
lc.set_linewidth(5)
ax.add_collection(lc)
ax.set_xlim(0, 18)
ax.set_ylim(0, 18)
plt.show()
# Now show how to set individual properties in a collection
fig, ax = plt.subplots(1, 1)
lc = LineCollection([[(4, 10), (16, 10)],
[(2, 2), (10, 15), (6, 7)],
[(14, 3), (1, 1), (3, 5)]])
lc.set_color(['r', 'blue', (0.2, 0.9, 0.3)])
lc.set_linewidth([4, 3, 6])
ax.add_collection(lc)
ax.set_xlim(0, 18)
ax.set_ylim(0, 18)
plt.show()
from matplotlib.collections import RegularPolyCollection
fig, ax = plt.subplots(1, 1)
offsets = np.random.rand(20, 2)
collection = RegularPolyCollection(
numsides=5, # a pentagon
sizes=(150,),
offsets=offsets,
transOffset=ax.transData,
)
ax.add_collection(collection)
plt.show()
%load exercises/5.1-goldstar.py
from matplotlib.collections import StarPolygonCollection
fig, ax = plt.subplots(1, 1)
collection = StarPolygonCollection(5,
offsets=[(0.5, 0.5)],
transOffset=ax.transData)
ax.add_collection(collection)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above graph shows that the ratio between consecutive Fibonacci numbers and a distance measurement in kilometer and mile is close to being the same (~ 1.6), popularly referred to as the Golden Ratio.
|
<ASSISTANT_TASK:>
Python Code:
def plot_fibo_ratio(series):
ratios = []
for i in range(len(series)-1):
ratios.append(series[i+1]/series[i])
plt.plot(ratios, 'b*')
plt.ylabel('Ratio')
plt.xlabel('No.')
def plot_km_miles_ratio(kms):
miles_km = [1.6094*km/km for km in kms]
plt.plot(miles_km, 'ro')
num = 100
series = fibo(num)
plot_fibo_ratio(series)
plot_km_miles_ratio(series)
plt.title('Ratio between Fibonacci numbers & Golden ratio')
plt.legend(['Fibonacci Numbers', 'Miles/Km'])
plt.show()
def estimate_kms(miles):
approx_kms = []
exact_kms = [1.6094*m for m in miles[1:]]
for i in range(len(series)-1):
approx_kms.append(series[i]+series[i+1])
plt.figure(2)
plt.plot(approx_kms, exact_kms, 'ro')
plt.title('Approximating kilometers using fibonacci')
series = fibo(num)
estimate_kms(series)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step2: 4.5.1 UV coverage
Step3: Let's express the corresponding physical baseline in ENU coordinates.
Step4: Let's place the interferometer at a latitude $L_a=+45^\circ00'00''$.
Step5: Figure 4.5.1
Step6: 4.5.1.1.3 Computing of the projected baselines in ($u$,$v$,$w$) coordinates as a function of time
Step7: As the $u$, $v$, $w$ coordinates explicitly depend on $H$, we must evaluate them for each observational time step. We will use the equations defined in $\S$ 4.2.2 ➞
Step8: We now have everything that describes the $uvw$-track of the baseline (over an 8-hour observational period). It is hard to predict which locus the $uvw$ track traverses given only the three mathematical equations from above. Let's plot it in $uvw$ space and its projection in $uv$ space.
Step9: Figure 4.5.2
Step10: Figure 4.5.3
Step11: Let's compute the $uv$ tracks of an observation of the NCP ($\delta=90^\circ$)
Step12: Let's compute the uv tracks when observing a source at $\delta=30^\circ$
Step13: Figure 4.5.4
Step14: Figure 4.5.5
Step15: <span style="background-color
Step16: We then convert the ($\alpha$,$\delta$) to $l,m$
Step17: The source and phase centre coordinates are now given in degrees.
Step18: Figure 4.5.6
Step19: We create the dimensions of our visibility plane.
Step20: We create our fully-filled visibility plane. With a "perfect" interferometer, we could sample the entire $uv$-plane. Since we only have a finite amount of antennas, this is never possible in practice. Recall that our sky brightness $I(l,m)$ is related to our visibilites $V(u,v)$ via the Fourier transform. For a bunch of point sources we can therefore write
Step21: Below we sample our visibility plane on the $uv$-track derived in the first section, i.e. $V(u_t,v_t)$.
Step22: Figure 4.5.7
Step23: Figure 4.5.8
Step24: Figure 4.5.9
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
from mpl_toolkits.mplot3d import Axes3D
import plotBL
HTML('../style/code_toggle.html')
ant1 = np.array([-500e3,500e3,0]) # in m
ant2 = np.array([500e3,-500e3,+10]) # in m
b_ENU = ant2-ant1 # baseline
D = np.sqrt(np.sum((b_ENU)**2)) # |b|
print(str(D/1000)+" km")
L = (np.pi/180)*(45+0./60+0./3600) # Latitude in radians
A = np.arctan2(b_ENU[0],b_ENU[1])
print("Baseline Azimuth="+str(np.degrees(A))+"°")
E = np.arcsin(b_ENU[2]/D)
print("Baseline Elevation="+str(np.degrees(E))+"°")
%matplotlib nbagg
plotBL.sphere(ant1,ant2,A,E,D,L)
# Observation parameters
c = 3e8 # Speed of light
f = 1420e9 # Frequency
lam = c/f # Wavelength
dec = (np.pi/180)*(-30-43.0/60-17.34/3600) # Declination
time_steps = 600 # Time Steps
h = np.linspace(-4,4,num=time_steps)*np.pi/12 # Hour angle window
ant1 = np.array([25.095,-9.095,0.045])
ant2 = np.array([90.284,26.380,-0.226])
b_ENU = ant2-ant1
D = np.sqrt(np.sum((b_ENU)**2))
L = (np.pi/180)*(-30-43.0/60-17.34/3600)
A=np.arctan2(b_ENU[0],b_ENU[1])
print("Azimuth=",A*(180/np.pi))
E=np.arcsin(b_ENU[2]/D)
print("Elevation=",E*(180/np.pi))
X = D*(np.cos(L)*np.sin(E)-np.sin(L)*np.cos(E)*np.cos(A))
Y = D*np.cos(E)*np.sin(A)
Z = D*(np.sin(L)*np.sin(E)+np.cos(L)*np.cos(E)*np.cos(A))
u = lam**(-1)*(np.sin(h)*X+np.cos(h)*Y)/1e3
v = lam**(-1)*(-np.sin(dec)*np.cos(h)*X+np.sin(dec)*np.sin(h)*Y+np.cos(dec)*Z)/1e3
w = lam**(-1)*(np.cos(dec)*np.cos(h)*X-np.cos(dec)*np.sin(h)*Y+np.sin(dec)*Z)/1e3
%matplotlib nbagg
plotBL.UV(u,v,w)
%matplotlib inline
from matplotlib.patches import Ellipse
# parameters of the UVtrack as an ellipse
a=np.sqrt(X**2+Y**2)/lam/1e3 # major axis
b=a*np.sin(dec) # minor axis
v0=Z/lam*np.cos(dec)/1e3 # center of ellipse
plotBL.UVellipse(u,v,w,a,b,v0)
L=np.radians(90.)
ant1 = np.array([25.095,-9.095,0.045])
ant2 = np.array([90.284,26.380,-0.226])
b_ENU = ant2-ant1
D = np.sqrt(np.sum((b_ENU)**2))
A=np.arctan2(b_ENU[0],b_ENU[1])
print("Azimuth=",A*(180/np.pi))
E=np.arcsin(b_ENU[2]/D)
print("Elevation=",E*(180/np.pi))
X = D*(np.cos(L)*np.sin(E)-np.sin(L)*np.cos(E)*np.cos(A))
Y = D*np.cos(E)*np.sin(A)
Z = D*(np.sin(L)*np.sin(E)+np.cos(L)*np.cos(E)*np.cos(A))
dec=np.radians(90.)
uNCP = lam**(-1)*(np.sin(h)*X+np.cos(h)*Y)/1e3
vNCP = lam**(-1)*(-np.sin(dec)*np.cos(h)*X+np.sin(dec)*np.sin(h)*Y+np.cos(dec)*Z)/1e3
wNCP = lam**(-1)*(np.cos(dec)*np.cos(h)*X-np.cos(dec)*np.sin(h)*Y+np.sin(dec)*Z)/1e3
# parameters of the UVtrack as an ellipse
aNCP=np.sqrt(X**2+Y**2)/lam/1e3 # major axis
bNCP=aNCP*np.sin(dec) # minor axi
v0NCP=Z/lam*np.cos(dec)/1e3 # center of ellipse
dec=np.radians(30.)
u30 = lam**(-1)*(np.sin(h)*X+np.cos(h)*Y)/1e3
v30 = lam**(-1)*(-np.sin(dec)*np.cos(h)*X+np.sin(dec)*np.sin(h)*Y+np.cos(dec)*Z)/1e3
w30 = lam**(-1)*(np.cos(dec)*np.cos(h)*X-np.cos(dec)*np.sin(h)*Y+np.sin(dec)*Z)/1e3
a30=np.sqrt(X**2+Y**2)/lam/1e3 # major axis
b30=a*np.sin(dec) # minor axi
v030=Z/lam*np.cos(dec)/1e3 # center of ellipse
%matplotlib inline
plotBL.UVellipse(u30,v30,w30,a30,b30,v030)
plotBL.UVellipse(uNCP,vNCP,wNCP,aNCP,bNCP,v0NCP)
L=np.radians(90.)
X = D*(np.cos(L)*np.sin(E)-np.sin(L)*np.cos(E)*np.cos(A))
Y = D*np.cos(E)*np.sin(A)
Z = D*(np.sin(L)*np.sin(E)+np.cos(L)*np.cos(E)*np.cos(A))
# At local zenith == Celestial Equator
dec=np.radians(0.)
uEQ = lam**(-1)*(np.sin(h)*X+np.cos(h)*Y)/1e3
vEQ = lam**(-1)*(-np.sin(dec)*np.cos(h)*X+np.sin(dec)*np.sin(h)*Y+np.cos(dec)*Z)/1e3
wEQ = lam**(-1)*(np.cos(dec)*np.cos(h)*X-np.cos(dec)*np.sin(h)*Y+np.sin(dec)*Z)/1e3
# parameters of the UVtrack as an ellipse
aEQ=np.sqrt(X**2+Y**2)/lam/1e3 # major axis
bEQ=aEQ*np.sin(dec) # minor axi
v0EQ=Z/lam*np.cos(dec)/1e3 # center of ellipse
# Close to Zenith
dec=np.radians(10.)
u10 = lam**(-1)*(np.sin(h)*X+np.cos(h)*Y)/1e3
v10 = lam**(-1)*(-np.sin(dec)*np.cos(h)*X+np.sin(dec)*np.sin(h)*Y+np.cos(dec)*Z)/1e3
w10 = lam**(-1)*(np.cos(dec)*np.cos(h)*X-np.cos(dec)*np.sin(h)*Y+np.sin(dec)*Z)/1e3
a10=np.sqrt(X**2+Y**2)/lam/1e3 # major axis
b10=a*np.sin(dec) # minor axi
v010=Z/lam*np.cos(dec)/1e3 # center of ellipse
%matplotlib inline
plotBL.UVellipse(u10,v10,w10,a10,b10,v010)
plotBL.UVellipse(uEQ,vEQ,wEQ,aEQ,bEQ,v0EQ)
H = np.linspace(-6,6,600)*(np.pi/12) #Hour angle in radians
d = 100 #We assume that we have already divided by wavelength
delta = 60*(np.pi/180) #Declination in degrees
u_60 = d*np.cos(H)
v_60 = d*np.sin(H)*np.sin(delta)
RA_sources = np.array([5+30.0/60,5+32.0/60+0.4/3600,5+36.0/60+12.8/3600,5+40.0/60+45.5/3600])
DEC_sources = np.array([60,60+17.0/60+57.0/3600,61+12.0/60+6.9/3600,61+56.0/60+34.0/3600])
Flux_sources_labels = np.array(["","1 Jy","0.5 Jy","0.2 Jy"])
Flux_sources = np.array([1,0.5,0.1]) #in Jy
step_size = 200
print("Phase center Source 1 Source 2 Source3")
print(repr("RA="+str(RA_sources)).ljust(2))
print("DEC="+str(DEC_sources))
RA_rad = np.array(RA_sources)*(np.pi/12)
DEC_rad = np.array(DEC_sources)*(np.pi/180)
RA_delta_rad = RA_rad-RA_rad[0]
l = np.cos(DEC_rad)*np.sin(RA_delta_rad)
m = (np.sin(DEC_rad)*np.cos(DEC_rad[0])-np.cos(DEC_rad)*np.sin(DEC_rad[0])*np.cos(RA_delta_rad))
print("l=",l*(180/np.pi))
print("m=",m*(180/np.pi))
point_sources = np.zeros((len(RA_sources)-1,3))
point_sources[:,0] = Flux_sources
point_sources[:,1] = l[1:]
point_sources[:,2] = m[1:]
%matplotlib inline
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.xlabel("$l$ [degrees]")
plt.ylabel("$m$ [degrees]")
plt.plot(l[0],m[0],"bx")
plt.plot(l[1:]*(180/np.pi),m[1:]*(180/np.pi),"ro")
counter = 1
for xy in zip(l[1:]*(180/np.pi)+0.25, m[1:]*(180/np.pi)+0.25):
ax.annotate(Flux_sources_labels[counter], xy=xy, textcoords='offset points',horizontalalignment='right',
verticalalignment='bottom')
counter = counter + 1
plt.grid()
u = np.linspace(-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10, num=step_size, endpoint=True)
v = np.linspace(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10, num=step_size, endpoint=True)
uu, vv = np.meshgrid(u, v)
zz = np.zeros(uu.shape).astype(complex)
s = point_sources.shape
for counter in range(1, s[0]+1):
A_i = point_sources[counter-1,0]
l_i = point_sources[counter-1,1]
m_i = point_sources[counter-1,2]
zz += A_i*np.exp(-2*np.pi*1j*(uu*l_i+vv*m_i))
zz = zz[:,::-1]
u_track = u_60
v_track = v_60
z = np.zeros(u_track.shape).astype(complex)
s = point_sources.shape
for counter in range(1, s[0]+1):
A_i = point_sources[counter-1,0]
l_i = point_sources[counter-1,1]
m_i = point_sources[counter-1,2]
z += A_i*np.exp(-1*2*np.pi*1j*(u_track*l_i+v_track*m_i))
plt.figure(figsize=(12,6))
plt.subplot(121)
plt.imshow(zz.real,extent=[-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10,-1*(np.amax(abs(v_60)))-10, \
np.amax(abs(v_60))+10])
plt.plot(u_60,v_60,"k")
plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10])
plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10)
plt.xlabel("u")
plt.ylabel("v")
plt.title("Real part of visibilities")
plt.subplot(122)
plt.imshow(zz.imag,extent=[-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10,-1*(np.amax(abs(v_60)))-10, \
np.amax(abs(v_60))+10])
plt.plot(u_60,v_60,"k")
plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10])
plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10)
plt.xlabel("u")
plt.ylabel("v")
plt.title("Imaginary part of visibilities")
plt.figure(figsize=(12,6))
plt.subplot(121)
plt.plot(z.real)
plt.xlabel("Timeslots")
plt.ylabel("Jy")
plt.title("Real: sampled visibilities")
plt.subplot(122)
plt.plot(z.imag)
plt.xlabel("Timeslots")
plt.ylabel("Jy")
plt.title("Imag: sampled visibilities")
plt.figure(figsize=(12,6))
plt.subplot(121)
plt.imshow(abs(zz),
extent=[-1*(np.amax(np.abs(u_60)))-10,
np.amax(np.abs(u_60))+10,
-1*(np.amax(abs(v_60)))-10,
np.amax(abs(v_60))+10])
plt.plot(u_60,v_60,"k")
plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10])
plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10)
plt.xlabel("u")
plt.ylabel("v")
plt.title("Amplitude of visibilities")
plt.subplot(122)
plt.imshow(np.angle(zz),
extent=[-1*(np.amax(np.abs(u_60)))-10,
np.amax(np.abs(u_60))+10,
-1*(np.amax(abs(v_60)))-10,
np.amax(abs(v_60))+10])
plt.plot(u_60,v_60,"k")
plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10])
plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10)
plt.xlabel("u")
plt.ylabel("v")
plt.title("Phase of visibilities")
plt.figure(figsize=(12,6))
plt.subplot(121)
plt.plot(abs(z))
plt.xlabel("Timeslots")
plt.ylabel("Jy")
plt.title("Abs: sampled visibilities")
plt.subplot(122)
plt.plot(np.angle(z))
plt.xlabel("Timeslots")
plt.ylabel("Jy")
plt.title("Phase: sampled visibilities")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting SAV Imaging Tab-like Metrics
Step2: By default, the run_metrics class loads all the InterOp files.
Step3: The run_metrics class can use this list to load only the required InterOp files as follows
Step4: The column headers for the imaing table can be created as follows
Step5: Convert the columns object to a list of strings.
Step6: Subsample rows and columns
Step7: The data from imaging table can populate a numpy ndarray as follows
Step8: Convert the header list and data ndarray into a Pandas table.
Step9: Render the Imaging Table data using Pandas
Step10: Getting Only Occpuancy from the imaging table
Step11: This allows you to select a specific file to load.
Step12: Select only the first row_count rows.
Step13: Select a subset of columns
Step14: Convert to a Pandas DataFrame object
Step15: Only display data from the first cycle
|
<ASSISTANT_TASK:>
Python Code:
run_folder = r""
from interop import py_interop_run_metrics, py_interop_run, py_interop_table
import numpy
import pandas as pd
run_metrics = py_interop_run_metrics.run_metrics()
valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)
py_interop_table.list_imaging_table_metrics_to_load(valid_to_load, False)
run_metrics.read(run_folder, valid_to_load)
columns = py_interop_table.imaging_column_vector()
py_interop_table.create_imaging_table_columns(run_metrics, columns)
headers = []
for i in range(columns.size()):
column = columns[i]
if column.has_children():
headers.extend([column.name()+"("+subname+")" for subname in column.subcolumns()])
else:
headers.append(column.name())
row_count=3
column_count=7
headers=headers[:column_count]
print headers
column_count = py_interop_table.count_table_columns(columns)
row_offsets = py_interop_table.map_id_offset()
py_interop_table.count_table_rows(run_metrics, row_offsets)
data = numpy.zeros((row_offsets.size(), column_count), dtype=numpy.float32)
py_interop_table.populate_imaging_table_data(run_metrics, columns, row_offsets, data.ravel())
data=data[:row_count, :]
d = []
for col, label in enumerate(headers):
d.append( (label, pd.Series([val for val in data[:, col]], index=[tuple(r) for r in data[:, :3]])))
df = pd.DataFrame.from_dict(dict(d))
print(df.to_string(index=False))
valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)
valid_to_load[py_interop_run.ExtendedTile] = 1
valid_to_load[py_interop_run.Tile] = 1
valid_to_load[py_interop_run.Extraction] = 1
run_metrics.clear()
run_metrics.read(run_folder, valid_to_load)
py_interop_table.create_imaging_table_columns(run_metrics, columns)
headers = []
for i in range(columns.size()):
column = columns[i]
if column.has_children():
headers.extend([column.name()+"("+subname+")" for subname in column.subcolumns()])
else:
headers.append(column.name())
column_count = py_interop_table.count_table_columns(columns)
row_offsets = py_interop_table.map_id_offset()
py_interop_table.count_table_rows(run_metrics, row_offsets)
data = numpy.zeros((row_offsets.size(), column_count), dtype=numpy.float32)
py_interop_table.populate_imaging_table_data(run_metrics, columns, row_offsets, data.ravel())
data=data[:row_count, :]
header_subset = ["Lane", "Tile", "Cycle", "% Occupied"]
header_index = [(header, headers.index(header)) for header in header_subset]
ids = numpy.asarray([headers.index(header) for header in header_subset[:3]])
d = []
for label, col in header_index:
d.append( (label, pd.Series([val for val in data[:, col]], index=[tuple(r) for r in data[:, ids]])))
df = pd.DataFrame.from_dict(dict(d))
df = df.loc[df['Cycle'] == 1.0]
print(df.to_string(index=False))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Kalman filtering
Step2: Results
|
<ASSISTANT_TASK:>
Python Code:
from IPython.html.widgets import interact, interactive, fixed
from IPython.html.widgets import FloatSlider
from CO2simulation import CO2simulation
def plot_CO2plume(time):
import param as param
CO2 = CO2simulation(param)
x = CO2.extract_state(int(time/3))
data = CO2.extract_data(int(time/3))
fig_setting = vco2.getImgParam(param)
vco2.plotCO2_data_map(x, data, 0, 20, fig_setting)
plt.show()
interact(plot_CO2plume,
time = FloatSlider(value=0, min=0, max=120));
import visualizeCO2 as vCO2
vCO2.scale_barplot()
# theta controls the trade-off between data misfit and variance in final estimates
def plot_CO2maps(theta):
import param
CO2 = CO2simulation(param)
param.theta = (theta,1e-5)
hikf, x_kf, cov_kf = simCO2.CO2_filter(CO2, param)
fig_setting = vco2.getImgParam(param)
vco2.plotCO2map(x_kf,cov_kf,fig_setting)
plt.show()
print "Theta Variance"
print " %f %f" % (theta,np.sum(cov_kf[-1]))
interact(plot_CO2maps,
theta = FloatSlider(value=1.14, min=1.14e-3, max=1.14e1, step=1));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task 4
Step2: Task 5
Step3: Task 6
Step4: Task 7
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import itertools
from scipy import stats
from statsmodels.stats.descriptivestats import sign_test
from statsmodels.stats.weightstats import zconfint
%pylab inline
data = np.array([49,58,75,110,112,132,151,276,281,362])
pylab.hist(data)
pylab.show()
print '95%% confidence interval for the mean time: [%f, %f]' % zconfint(data)
print "M: %d, p-value: %f" % sign_test(data, 200)
stats.wilcoxon(data - 200)
a = np.array([22,22,15,13,19,19,18,20,21,13,13,15])
b = np.array([17,18,18,15,12,4,14,15,10])
stats.mannwhitneyu(b, a, alternative='less')
stats.mannwhitneyu(a, b, alternative='greater')
def get_bootstrap_samples(data, n_samples):
indices = np.random.randint(0, len(data), (n_samples, len(data)))
samples = data[indices]
return samples
def stat_intervals(stat, alpha):
boundaries = np.percentile(stat, [100 * alpha / 2., 100 * (1 - alpha / 2.)])
return boundaries
data = pd.read_csv('challenger.txt', sep='\t')
data.head()
a = data[data['Incident']==0]['Temperature'].values
b = data[data['Incident']==1]['Temperature'].values
len(a) + len(b) == data.shape[0]
np.random.seed(0)
a_scores = np.array(map(np.mean, get_bootstrap_samples(a, 1000)))
b_scores = np.array(map(np.mean, get_bootstrap_samples(b, 1000)))
print "95% confidence interval for the difference between medians", stat_intervals(a_scores - b_scores, 0.05)
print "95% confidence interval for the difference between medians", stat_intervals(b_scores - a_scores, 0.05)
def permutation_t_stat_ind(sample1, sample2):
return np.mean(sample1) - np.mean(sample2)
def get_random_combinations(n1, n2, max_combinations):
index = range(n1 + n2)
indices = set([tuple(index)])
for i in range(max_combinations - 1):
np.random.shuffle(index)
indices.add(tuple(index))
return [(index[:n1], index[n1:]) for index in indices]
def permutation_zero_dist_ind(sample1, sample2, max_combinations = None):
joined_sample = np.hstack((sample1, sample2))
n1 = len(sample1)
n = len(joined_sample)
if max_combinations:
indices = get_random_combinations(n1, len(sample2), max_combinations)
else:
indices = [(list(index), filter(lambda i: i not in index, range(n))) \
for index in itertools.combinations(range(n), n1)]
distr = [joined_sample[list(i[0])].mean() - joined_sample[list(i[1])].mean() \
for i in indices]
return distr
def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
t_stat = permutation_t_stat_ind(sample, mean)
zero_distr = permutation_zero_dist_ind(sample, mean, max_permutations)
if alternative == 'two-sided':
return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'less':
return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'greater':
return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr)
np.random.seed(0)
print "p-value: %f" % permutation_test(a, b, max_permutations = 10000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
data = {'a': [1, 2, 3, 4, 5], 'b': [2, -6, 0, -4, 100]}
df = pd.DataFrame(data)
result = np.where((df.a<= 4)&(df.a>1), df.b,np.nan)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: % pylab inline est une commande python qui importe les modules numpy et matplotlib. L'option inline indique que les figures Matplotlib seront insérées dans le notebook lui-même plutôt que dans une fenêtre graphique séparée. C'est la seule option possible car votre système ne dispose pas d'interface graphique.
Step2: Cela crée un objet Robot qui peut être utilisé pour accéder aux moteurs et aux capteurs. Il prend en chage toute la communication de bas niveau pour vous. Vous n'avez donc pas besoin de connaitre les détails du protocole de communication pour faire fonctionner un moteur. Les champs motors et sensors du Robot sont automatiquement synchronisés pour correspondre à l'état de leurs équivalents matériels.
Step3: Accéder aux moteurs
Step4: Comme vous pouvez le constater, vous obtenez une liste de tous les moteurs de votre objet Robot.
Step5: TRAVAIL
Step6: Lire les valeurs des moteurs
Step7: On peut avoir la position courante de tous les moteurs à l'aide de l'instruction ci-dessous.
Step8: Il est important de comprendre que "poppy.m1.present_position" est automatiquement mis à jour avec la position courante du moteur réel (à 50Hz).
Step9: Commander les moteurs
Step10: QUESTIONS
Step11: Dans ces exemples, le moteur tourne aussi vite que possible (c'est le fonctionnement par défaut). Vous pouvez changer la vitesse maximale du moteur qui est enregistrée dans le registre moving_speed du moteur
Step12: Maintenant le moteur m1 ne peut pas aller plus vite que 50 degrés par seconde. Faites le se déplacer une nouvelle fois pour constater la différence.
Step13: Les principaux registres sont
Step14: Vous pouvez normalement faire bouger ce moteur à la main. Par exemple, cela vous sera utile pour programmer votre robot par démonstration (cf. notebook correspondant).
Step15: Contrôler les LED des moteurs
Step16: Vous pouvez connaitre toutes les couleurs disponibles à l'aide la commande suivante
Step17: Lire des capteurs/sensors
Step18: Lire des capteurs se fait exactement de la même manière que lire des registres de votre robot. Vous pouvez accéder à vos capteurs via
Step19: Ici, nous avons 2 capteurs
Step20: Vous pouvez récupérer tous les registres existants d'un capteur
Step21: et recupérer et afficher une image à partir de la caméra
Step22: Comme pour les moteurs, les valeurs des capteurs sont automatiquement synchronisées en arrière plan avec le capteur physique. Si vous exécutez une nouvelle fois les instructions précédentes, vous obtiendrez une image plus récente.
Step23: Comportements de haut niveau
Step24: Ces comportements (ou primitives en "terminologie poppy") peuvent être démarrés, arrétés, mis en pause, etc.
Step25: Vous pouvez faire danser le Poppy Ergo Jr pendant 10 secondes
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from __future__ import print_function
from poppy.creatures import PoppyErgoJr
poppy = PoppyErgoJr()
poppy.rest_posture.start()
poppy_ergo_jr.motors
for m in poppy.motors:
print(m.name)
#print ("terminé")
poppy.m1
poppy.m1.present_position
[m.present_position for m in poppy.motors]
poppy.
poppy.m1.goal_position = 20
#COMPLETER
poppy.m1.goal_position =
#COMPLETER : pour mettre le registre moving_speed du moteur m1 de votre robot poppy à 50
poppy. =50
poppy.m1.goal_position = 90
poppy.m6.compliant = True
poppy.m6.compliant = False
import time
for m in poppy.motors:
time.sleep(0.5)
m.led = 'yellow'
time.sleep(1.0)
m.led = 'off'
from pypot.dynamixel.conversion import XL320LEDColors
print(list(XL320LEDColors))
import cv2
# pour utiliser la caméra
%matplotlib inline
import matplotlib.pyplot as plt
from hampy import detect_markers
# pour détecter des marqueurs
img = poppy.camera.frame
plt.imshow(img)
# Que contient le conteneur img ?
# Que signifie plt.imshow(img) par quelle autre commande peut-on le remplacer ?
poppy.sensors
poppy.camera
poppy.camera.registers
%matplotlib inline
import matplotlib.pyplot as plt
img = poppy.camera.frame
plt.imshow(img)
plt.imshow(poppy.camera.frame)
[p.name for p in poppy.primitives]
poppy.tetris_posture.start()
import time
poppy.dance.start()
time.sleep(10)
poppy.dance.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: Example 1
Step3: Example 2
Step4: Example 3
Step5: Equation
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
def comb(shape, delta, offset):
shape = np.array(shape)
assert shape.size <= 3
g = np.zeros(shape)
if shape.size == 1:
g[offset::delta] = 1
elif shape.size == 2:
g[offset[0]::delta[0], offset[1]::delta[1]] = 1
elif shape.size == 3:
g[offset[0]::delta[0], offset[1]::delta[1], offset[2]::delta[2]] = 1
return g
testing = (__name__ == "__main__")
if testing:
! jupyter nbconvert --to python comb.ipynb
import numpy as np
import sys,os
import matplotlib.image as mpimg
ia898path = os.path.abspath('../../')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
if testing:
u1 = ia.comb(10, 3, 2)
print('u1=',u1)
u2 = ia.comb((10,), 3, 2)
print('u2=',u2)
if testing:
u3 = ia.comb((7,9), (1,2), (0,1))
print('u3=\n',u3)
if testing:
u4 = ia.comb((4,5,9), (2,1,2), (1,0,1))
print(u4)
if testing:
print('testing comb')
print(repr(ia.comb(10, 3, 2)) == repr(np.array(
[0., 0., 1., 0., 0., 1., 0., 0., 1., 0.])))
print(repr(ia.comb((7,9), (3,4), (3,2))) == repr(np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 1., 0., 0.]])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Twin brothers and bayes theorem
Step2: So, we can conclude that Elvis had a 14.8% chance to identical twins with his brother.
Step3: The Dice Problem chapter 3
Step4: Next create a dice object with dice of 4, 6, 8, 12 and 20 sides
Step5: Roll a 6 and see the probabilities of being each dice
Step6: Now roll a series of numbers
Step7: For these roles, we see that the 8 sided dice is most probable. It is still possible for the 20 sided dice, but only with a .1% chance.
Step8: Create train object and update with train number 60
Step9: Plot current probabilities of numbers of trains
Step10: Because 60 is not actually a good guess, we will compute the mean of the posterior distribution
Step11: The mean of the posterior distribution is the value that minimizes error. In simpler terms, we get the smallest number (error) when we subtract the actual number of trains from the mean of posterior distribution.
Step12: After the two updates, the error minimizing value has gone down to 164.
Step15: We initally thought that givin lower number of trains higher probabilities would give us a more accurate result. However, over just a few data points, we get a nearly identical graph to the one with linearly represented hypotheses.
Step16: Next create the two hypotheses. As expected, before we see any class arival data, both watches have equal chances of being worn.
Step17: As a sanity check, suppose we arrive to class exactly on time, and the next 5 minutes late
Step18: Our model says that both hypotheses have still have the same probabilility, this makes sense because one hypotheses is centered at 0 and the other at 5.
Step19: After this series of updates, we have a slightly increased chance of using the on time watch. This makes sense because on average, the times have been slightly closer to 0 than -5.
|
<ASSISTANT_TASK:>
Python Code:
from thinkbayes2 import Pmf, Suite
import thinkplot
import math
% matplotlib inline
# calculate number of male-male dizygotic twins using the percentage of dizygotic and percentage of male-male
DiMM = 100 * .92 * .25
# calculate number of male-male monozygotic twins using the percentage of monozygotic and percentage of male-male
MoMM = 100 * .08 * .5
# calculate total number of male-male twins
TotalMM = DiMM + MoMM
print("Number of male-male dizygotic twins: {}".format(DiMM))
print("Number of male-male monozygotic twins: {}".format(MoMM))
print("Total number of male-male twins: {}".format(TotalMM))
# next we can calculate the fraction of male-male twins that are monozygotic
fractionMoMM = MoMM / TotalMM
percentMoMM = fractionMoMM * 100
print("Percentage of male-male monozygotic twins: {0:.1f}%".format(percentMoMM))
twins = dict()
# first calculate the total percentage of male-male twins. We can do this by adding the percentage of male-male
# monozygotic and the percentage of male-male dizygotic
twins['male-male'] = (.08*.50 + .92*.25)
twins['male-male|monozygotic'] = (.50)
twins['monozygotic'] = (.08)
print(twins['male-male'])
print(twins['male-male|monozygotic'])
print(twins['monozygotic'])
# now using bayes theorem
temp = twins['male-male|monozygotic'] * twins['monozygotic'] / twins['male-male']
print("P(monozygotic|male-male): {0:.3f}".format(temp))
class Dice(Suite):
def Likelihood(self, data, hypo):
if hypo < data:
return 0
else:
return 1 / hypo
suite = Dice([4, 6, 8, 12, 20])
suite.Update(6)
suite.Print()
for roll in [6, 8, 7, 7, 5, 4]:
suite.Update(roll)
suite.Print()
class Train(Suite):
# hypo is the number of trains
# data is an observed serial number
def Likelihood(self, data, hypo):
if data > hypo:
return 0
else:
return 1 / hypo
hypos = range(1, 1001)
train = Train(hypos)
train.Update(60)
thinkplot.Pdf(train)
def Mean(suite):
total = 0
for hypo, prob in suite.Items():
total += hypo * prob
return total
print(Mean(train))
for data in [50, 90]:
train.Update(data)
print(Mean(train))
thinkplot.Pdf(train)
class Train2(Dice):
def __init__(self, hypos, alpha=1.0):
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, hypo**(-alpha))
self.Normalize()
hypos2 = range(1, 1001)
train2 = Train2(hypos2)
thinkplot.Pmf(train2)
for data in [50, 60, 90]:
train2.Update(data)
thinkplot.Pmf(train2)
class Watch(Suite):
Maps watch hypotheses to probabilities
def f(x, b):
f is a function that returns a Gaussian Function.
Args:
x (int): the primary variable
b (int): a constant offset used to make fast or slow clocks
return math.exp((-1 * (x-b)**2) / (32))
watch1_probs = dict()
for i in range(-15,15):
watch1_probs[i] = f(i, 0)
watch2_probs = dict()
for i in range(-15,15):
watch2_probs[i] = f(i, -5)
hypotheses = {
'watch 1':watch1_probs,
'watch 2':watch2_probs
}
def __init__(self, hypos):
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, 1)
self.Normalize()
def Likelihood(self, data, hypo):
time = self.hypotheses[hypo]
like = time[data]
return like
watches = Watch(['watch 1', 'watch 2'])
watches.Print()
for arrival_time in [0,-5]:
watches.Update(arrival_time)
watches.Print()
for arrival_time in [0,-2,-2,-3,-5]:
watches.Update(arrival_time)
watches.Print()
watches.Update(-11)
watches.Print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a Dakota instance to perform a centered parameter study with HydroTrend.
Step2: Define the HydroTrend input variables to be used in the parameter study, as well as the initial point in parameter space, the step size, and the range of the variables.
Step3: Define the HydroTrend outputs to be used in the parameter study, as well as the statistics to be calculated from them.
Step4: HydroTrend requires a set of files to run.
Step5: The template file provides the configuration file for HydroTrend, but with all parameter values replaced by variables in the form {parameter_name}. The parameters file provides descriptions, ranges, and default values for all of the parameters represented in the template file. The hypsometry file describes the change in elevation along the river's course from source to sea.
Step6: Next, we must replace the default values for the variables for starting_mean_annual_temperature and total_annual_precipitation with variable names for Dakota to substitute into. The CSDMS Dakota package also includes a routine to do this. Import this routine and use it to create a Dakota template file.
Step7: Associate the Dakota template file and the hypsometry file with the Dakota instance.
Step8: Call the setup method to create files needed by Dakota, then run the experiment.
Step9: Check the output; in particular, the dakota.dat file.
|
<ASSISTANT_TASK:>
Python Code:
from dakotathon import Dakota
d = Dakota(method='centered_parameter_study', plugin='hydrotrend')
d.variables.descriptors = ['starting_mean_annual_temperature', 'total_annual_precipitation']
d.variables.initial_point = [15.0, 2.0]
d.method.steps_per_variable = [2, 5]
d.method.step_vector = [2.5, 0.2]
d.responses.response_descriptors = ['Qs_median', 'Q_mean']
d.responses.response_files = ['HYDROASCII.QS', 'HYDROASCII.Q']
d.responses.response_statistics = ['median', 'mean']
import os
data_dir = os.path.join(os.getcwd(), 'data')
template_file = os.path.join(data_dir, 'hydrotrend.in.tmpl')
parameters_file = os.path.join(data_dir, 'parameters.yaml')
hypsometry_file = os.path.join(data_dir, 'HYDRO0.HYPS')
from dakotathon.plugins.base import write_dflt_file
default_input_file = write_dflt_file(template_file, parameters_file, run_duration=365)
print default_input_file
from dakotathon.plugins.base import write_dtmpl_file
dakota_template_file = write_dtmpl_file(template_file, default_input_file, d.variables.descriptors)
print dakota_template_file
d.template_file = dakota_template_file
d.auxiliary_files = hypsometry_file
d.setup()
d.run()
%cat dakota.dat
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate correlated data
Step2: In a second step, we add 90 purely random Poisson spike trains using the homogeneous_poisson_process()| function, such that in total we have 10 spiketrains that exhibit occasional synchronized events, and 90 uncorrelated spike trains.
Step3: Mining patterns with SPADE
Step4: The output patterns of the method contains information on the found patterns. In this case, we retrieve the pattern we put into the data
Step5: Lastly, we visualize the found patterns using the function plot_patterns() of the viziphant library. Marked in red are the patterns of order ten injected into the data.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import quantities as pq
import neo
import elephant
import viziphant
np.random.seed(4542)
spiketrains = elephant.spike_train_generation.compound_poisson_process(
rate=5*pq.Hz, A=[0]+[0.98]+[0]*8+[0.02], t_stop=10*pq.s)
len(spiketrains)
for i in range(90):
spiketrains.append(elephant.spike_train_generation.homogeneous_poisson_process(
rate=5*pq.Hz, t_stop=10*pq.s))
patterns = elephant.spade.spade(
spiketrains=spiketrains, binsize=1*pq.ms, winlen=1, min_spikes=3,
n_surr=100,dither=5*pq.ms,
psr_param=[0,0,0],
output_format='patterns')['patterns']
patterns
viziphant.spade.plot_patterns(spiketrains, patterns)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Summarize
Step2: Predict what this code will do.
Step3: Predict what this code will do.
Step4: Summarize
Step5: Implement
Step6: Summarize
Step7: Implement
Step8: Summarize
Step9: Summarize
Step10: Predict what this code will do.
Step11: Summarize
|
<ASSISTANT_TASK:>
Python Code:
x = 5
print(x > 2)
x = 5
print(x < 2)
x = 20
print (x > 2)
x = 5
if x > 2:
print(x)
x = 0
if x > 2:
print(x)
x = 0
if x > 2:
print(x)
print("hello")
x = 20
if x < 5:
print(x)
x = 2
if x < 5 and x > 10:
print("condition met")
x = 2
if x < 5 or x > 10:
print("condition met")
x = 2
if not x > 5:
print("condition met")
x = 20
if x < 5 or x > 10:
print("HERE")
x = 5
if x > 2:
print("inside conditional")
print("also inside conditional")
if x < 2:
print("inside a different conditional")
print("not inside conditional")
x = 5
if x > 10:
print("condition 1")
else:
print("condition 2")
x = 1
if x > 1:
print("condition 1")
elif x == 1:
print("condition 2")
else:
print("condition 3")
x = -2
if x > 5:
print("a")
elif x > 0 and x <= 5:
print("b")
elif x > -6 and x <= 0:
print("c")
else:
print("d")
x = 5
if x > 10:
print("condition 1")
elif x < 0:
print("condition 2")
else:
print("condition 3")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features.
Step3: Also, copy and paste the predict_output() function to compute the predictions for an entire matrix of features given the matrix and the weights
Step4: To test your feature derivartive run the following
Step5: Gradient Descent
Step6: Visualizing effect of L2 penalty
Step7: Let us split the dataset into training set and test set. Make sure to use seed=0
Step8: In this part, we will only use 'sqft_living' to predict 'price'. Use the get_numpy_data function to get a Numpy versions of your data with only this feature, for both the train_data and the test_data.
Step9: Let's set the parameters for our optimization
Step10: First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step11: Compute the RSS on the TEST data for the following three sets of weights
Step12: We need to re-inialize the weights, since we have one extra parameter. Let us also set the step size and maximum number of iterations.
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
import numpy as np # note this allows us to refer to numpy as np instead
def feature_derivative_ridge(errors, feature, weight, l2_penalty, feature_is_constant):
# If feature_is_constant is True, derivative is twice the dot product of errors and feature
# Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight
return derivative
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([1., 10.])
test_predictions = predict_output(example_features, my_weights)
errors = test_predictions - example_output # prediction errors
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,1], my_weights[1], 1, False)
print np.sum(errors*example_features[:,1])*2+20.
print ''
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,0], my_weights[0], 1, True)
print np.sum(errors)*2.
def ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations=100):
weights = np.array(initial_weights) # make sure it's a numpy array
#while not reached maximum number of iterations:
# compute the predictions based on feature_matrix and weights using your predict_output() function
# compute the errors as predictions - output
for i in xrange(len(weights)): # loop over each weight
# Recall that feature_matrix[:,i] is the feature column associated with weights[i]
# compute the derivative for weight[i].
#(Remember: when i=0, you are computing the derivative of the constant!)
# subtract the step size times the derivative from the current weight
return weights
simple_features = ['sqft_living']
my_output = 'price'
train_data,test_data = sales.random_split(.8,seed=0)
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
(simple_test_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
initial_weights = np.array([0., 0.])
step_size = 1e-12
max_iterations=1000
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(simple_feature_matrix,output,'k.',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_0_penalty),'b-',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_high_penalty),'r-')
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)
(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
initial_weights = np.array([0.0,0.0,0.0])
step_size = 1e-12
max_iterations = 1000
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Overview
Step3: machine guns "spreads"
Step4: BR corner
Step5: Parts for the triggers, walls, and guns
Step6: 2. Flying assistant ("ship2")
Step7: ship2's sporadic movement
Step8: ship2's parts list
Step9: 3. "star"-ship (initially frozen)
Step10: 4. Special-effect Ending
Step11: These are the points I made, in the shape of a heart
Step12: Make parts and code for special effects
Step13: Finally, copy each of these to the game editor
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sys import path
path.append('..')
from zf_function_wrappers import *
from zf_common import *
from zf_macro_functions import *
FAR_AWAY = 9999
BIG = 2000
SIZE_0 = 0
# store all commands here (these go into the "AI, lives" window in http://zetaflow.skylogic.ca/game/edit)
cmds= []
# helper function: just adds one or more commands to the cmds array
def append(cmd):
if (isinstance(cmd, str)):
cmds.append(cmd)
print(cmd)
elif (isinstance(cmd, list)):
cmds.extend(cmd)
print('\n'.join(str(x) for x in cmd))
else:
raise(ValueError('What is this? {} {}'.format(type(cmd), cmd)))
# Part numbers with names (so we can refer to parts by name rather than by number)
parts = {
'killer_circle': 8, # circle that is invincible and placed at center of screen. This is the part that auto-kills the player by expanding in size.
'kill_switch_1': 5,
'kill_switch_2': 6,
'kill_switch_3': 7,
# the RIGHT parts to kill (circles)
'kill_me_first': 9,
'first_replacement': 12,
'second_replacement': 13,
'third_replacement': 14,
# parent parts for the kill switches and shields, and weapons
# These are the 4 "switch1b" in each corner
'corner_UL': 1,
'corner_BL': 3,
'corner_UR': 2,
'corner_BR': 4,
# "spread" machine guns
'BL_spread_spinner': 21, # "jet"
'BR_spread_spinner': 26, # "jet"
'UR_spread_spinner': 35, # "jet"
'BL_spread_0': 15,
'BL_spread_1': 16,
'BL_spread_2': 17,
'BL_spread_3': 18,
'BR_spread_0': 27,
'BR_spread_1': 28,
'BR_spread_2': 29,
'BR_spread_3': 30,
'UR_spread_0': 31,
'UR_spread_1': 32,
'UR_spread_2': 33,
'UR_spread_3': 34,
# walls
'wall_1': 22,
'wall_2': 23,
'wall_3': 24,
'wall_4': 25,
'BR_rocket_1': 36,
'BR_rocket_2': 37,
'BR_rocket_3': 38,
'BR_rocket_4': 39,
'BR_rocket_5': 40,
'BR_rocket_6': 41,
}
kill_switches = [5, 6, 7]
def on_death_kill_player(part_num):
when part dies, auto-kill the player
# first, set height and width to fill the whole screen
_cmds = []
_cmds.append(makeTriggerDeath(part_num, setWidth(parts['killer_circle'], BIG)))
_cmds.append(makeTriggerDeath(part_num, setHeight(parts['killer_circle'], BIG)))
# then, after 1 second, revert back
# NEVERMIND - player is doomed at this point! :)
#_cmds.append(makeTriggerDeath(part_num, makeTriggerTime(secToFrames(2), setWidth(parts['killer_circle'], SIZE_0))))
#_cmds.append(makeTriggerDeath(part_num, makeTriggerTime(secToFrames(2), setHeight(parts['killer_circle'], SIZE_0))))
append(_cmds)
# prepare the auto-death triggers
append(makeInvincible(parts['killer_circle']))
for num in kill_switches:
on_death_kill_player(num)
PROPER_DISTANCE = float('{:0.2f}'.format(np.sqrt(200**2+200**2))) # because it's position is 200 away from the core in both dimensions
print(PROPER_DISTANCE)
def disarm(right_part_to_kill, kill_switch_to_remove, replacement_part):
# first, player should destroy RIGHT PART, this will MOVE the next death trigger away and replace it with a destroyable part
append(makeTriggerDeath(right_part_to_kill, setDistance(kill_switch_to_remove, FAR_AWAY)))
# REPLACEMENT part will take over where RIGHT PART was
# first, at beginning it will sit far away
append(setDistance(replacement_part, FAR_AWAY))
# and later it will be moved into place
append(makeTriggerDeath(right_part_to_kill, setDistance(replacement_part, PROPER_DISTANCE)))
# first, player should destroy #9, this will MOVE the next death trigger away and replace it with a destroyable part
disarm(parts['kill_me_first'], parts['kill_switch_3'], parts['first_replacement'])
disarm(parts['first_replacement'], parts['kill_switch_1'], parts['second_replacement'])
disarm(parts['second_replacement'], parts['kill_switch_2'], parts['third_replacement'])
# WALLS
append(makeInvincible([parts['wall_1'], parts['wall_2'], parts['wall_3'], parts['wall_4']]))
f = lambda offset, interval: [float('{:0.2f}'.format(offset + x*interval)) for x in range(4)]
######## BOTTOM CORNERS: SPREADS
on_death = lambda cmds: [makeTriggerDeath(parts['kill_me_first'], cmd) for cmd in cmds]
# BL
for name, offset_seconds in zip([f'BL_spread_{i}' for i in range(4)], f(0, 0.3)):
append(on_death(macro_machineGunOnOff(parts[name], 'spread', off_seconds=10, on_seconds=1, cooldown=3, offset_seconds=offset_seconds)))
append(rotate(parts['BL_spread_spinner'], 2))
# BR
for name, offset_seconds in zip([f'BR_spread_{i}' for i in range(4)], f(2, 0.6)):
append(on_death(macro_machineGunOnOff(parts[name], 'spread', off_seconds=10, on_seconds=1, cooldown=3, offset_seconds=offset_seconds)))
append(rotate(parts['BR_spread_spinner'], 2))
######### TOP RIGHT CORNER: SPREADS
on_death = lambda cmds: [makeTriggerDeath(parts['first_replacement'], cmd) for cmd in cmds]
# UR
for name, offset_seconds in zip([f'UR_spread_{i}' for i in range(4)], f(10, 0.3)):
append(on_death(macro_machineGunOnOff(parts[name], 'spread', off_seconds=5, on_seconds=1, cooldown=2, offset_seconds=offset_seconds)))
append(rotate(parts['UR_spread_spinner'], 2))
# start off disabled
tmp = [parts[f'BL_spread_{i}'] for i in range(4)]
tmp += [parts[f'BR_spread_{i}'] for i in range(4)]
tmp += [parts[f'UR_spread_{i}'] for i in range(4)]
append(disableGun(tmp))
on_death = lambda cmd: makeTriggerDeath(parts['second_replacement'], cmd)
for i in range(1,6+1):
append(on_death(macro_machineGun(parts[f'BR_rocket_{i}'], 'square', cooldown=1)))
append(on_death(enableGun(parts[f'BR_rocket_{i}'])))
append(disableGun(parts[f'BR_rocket_{i}']))
c_red = 'FF0033'
c_frozen = '33CCFF'
c_white = 'FFFFFF'
c_green = '339900'
invis = '0, 0' # 0 xscale and yscale to make it invisible
txt = f'''
core, 400, 300, 0, 0.5, 0.5, -1, 0, {c_frozen}
c, 1, -3, 0, {invis}, e0, 8, FF0000
switch1b, 200, 200, 0, 1, 1, e0, 4, {c_white}
switch1b, -200, 200, 0, 1, 1, e0, 3, {c_white}
switch1b, 200, -200, 0, 1, 1, e0, 2, {c_white}
switch1b, -200, -200, 0, 1, 1, e0, 1, {c_white}
switch2b, -200, -200, 0, 2, 2, e0, 9, {c_red}
switch2b, 200, -200, 0, 2, 2, e0, 5, {c_red}
switch2b, -200, 200, 0, 2, 2, e0, 7, {c_red}
switch2b, 200, 200, 0, 2, 2, e0, 6, {c_red}
switch2b, -200, 200, 0, 2, 2, e0, 12, {c_red}
switch2b, 200, -200, 0, 2, 2, e0, 13, {c_red}
switch2b, 200, 200, 0, 2, 2, e0, 14, {c_red}
'''
# TEMP PART FOR TESTING
#txt += 'oddbox, -80, 24, 0, 1, 1, e0, 999, 0066FF'
# the walls that shield the kill switches
txt += f'''
rect, 20, 20, -45, 5, 1, e{parts['corner_UL']}, 22, {c_green}
rect, 20, -20, -135, 5, 1, e{parts['corner_BL']}, 23, {c_green}
rect, -20, 20, -135, 5, 1, e{parts['corner_UR']}, 24, {c_green}
rect, -20, -20, -45, 5, 1, e{parts['corner_BR']}, 25, {c_green}
'''
# BL, BR, TR corners: parents of guns for rotation
txt += f'''
jet, 0, 0, 0, {invis}, e{parts['corner_BL']}, {parts['BL_spread_spinner']}, 0066FF
jet, 0, 0, 0, {invis}, e{parts['corner_BR']}, {parts['BR_spread_spinner']}, 0066FF
jet, 0, 0, 0, {invis}, e{parts['corner_UR']}, {parts['UR_spread_spinner']}, 0066FF
'''
# and spreads to attach to above
txt += f'''
spread, 0, 50, 0, {invis}, e{parts['BL_spread_spinner']}, {parts['BL_spread_0']}, 666666
spread, 50, 0, 0, {invis}, e{parts['BL_spread_spinner']}, {parts['BL_spread_1']}, 666666
spread, 0, -50, 0, {invis}, e{parts['BL_spread_spinner']}, {parts['BL_spread_2']}, 666666
spread, -50, 0, 0, {invis}, e{parts['BL_spread_spinner']}, {parts['BL_spread_3']}, 666666
spread, 0, 50, 0, {invis}, e{parts['BR_spread_spinner']}, {parts['BR_spread_0']}, 666666
spread, 50, 0, 0, {invis}, e{parts['BR_spread_spinner']}, {parts['BR_spread_1']}, 666666
spread, 0, -50, 0, {invis}, e{parts['BR_spread_spinner']}, {parts['BR_spread_2']}, 666666
spread, -50, 0, 0, {invis}, e{parts['BR_spread_spinner']}, {parts['BR_spread_3']}, 666666
spread, 0, 50, 0, {invis}, e{parts['UR_spread_spinner']}, {parts['UR_spread_0']}, 666666
spread, 50, 0, 0, {invis}, e{parts['UR_spread_spinner']}, {parts['UR_spread_1']}, 666666
spread, 0, -50, 0, {invis}, e{parts['UR_spread_spinner']}, {parts['UR_spread_2']}, 666666
spread, -50, 0, 0, {invis}, e{parts['UR_spread_spinner']}, {parts['UR_spread_3']}, 666666
'''
# rocket for bottom right corner
txt += f'''
square, 0, 0, 135, {invis}, e{parts['wall_4']}, {parts['BR_rocket_1']}, 666666
square, 0, 0, 135, {invis}, e{parts['wall_4']}, {parts['BR_rocket_2']}, 666666
square, 0, 0, 135, {invis}, e{parts['wall_4']}, {parts['BR_rocket_3']}, 666666
square, 0, 0, 135, {invis}, e{parts['wall_4']}, {parts['BR_rocket_4']}, 666666
square, 0, 0, 135, {invis}, e{parts['wall_4']}, {parts['BR_rocket_5']}, 666666
square, 0, 0, 135, {invis}, e{parts['wall_4']}, {parts['BR_rocket_6']}, 666666
'''
print(txt)
copy(txt)
cmds += ['','',''] # some blank lines to show where ship2 code starts
# must be higher than all parts in above sections
STARTING_ID = 100
parts['ship2_core_parent'] = STARTING_ID+7
parts['ship2_propeller1'] = STARTING_ID+5
parts['ship2_propeller2'] = STARTING_ID+6
parts['ship2_tri1'] = STARTING_ID+8
parts['ship2_tri2'] = STARTING_ID+9
parts['ship2_tri3'] = STARTING_ID+20
parts['ship2_laser1'] = STARTING_ID+34
parts['ship2_laser2'] = STARTING_ID+35
ship2_singles = []
for i, num in enumerate([30,31,32,33]):
_id = STARTING_ID+num
parts[f'ship2_single{i}'] = _id
ship2_singles.append(_id)
# make sure ship2 lasers fire often
append(makeTriggerRepeat(0, 75, setGunTime([parts['ship2_laser1'], parts['ship2_laser2']], 49)))
# spinning propellers
append(rotate(parts['ship2_propeller1'], 20))
append(rotate(parts['ship2_propeller2'], -20))
append(rotate(parts['ship2_tri1'], 0.3))
append(rotate(parts['ship2_tri2'], -0.5, 0, 0, 180)) # 180 is so it starts off-screen
append(rotate(parts['ship2_tri3'], 0.5, 0, 90, 0))
append(rotate(parts['ship2_core_parent'], -1, 0, 0, 90)) # 90 rotates ship2 itself (b/c orientation that I made it originally didn't match well to the movement)
# ship2's machine gun fire
append(macro_machineGun(ship2_singles, 'single', cooldown=5))
copy_to_clipboard(cmds)
txt += '\n\n\n'
# in center, rotating part #1
txt += f'tri, 0, 0, 0, 0, 0, e0, {STARTING_ID+8}, CC0000\n'
# rotating part #2
txt += f'tri, 0, -600, 0, 0, 0, e{STARTING_ID+8}, {STARTING_ID+9}, CC0000\n'
# rotating part #3, same position as #2 but different rotate speed
txt += f'tri, 0, 0, 0, 0, 0, e{STARTING_ID+9}, {STARTING_ID+20}, CC0000\n'
# parent of ship2's core
txt += f'switch4b, 0, 400, 90, 0, 0, e{STARTING_ID+20}, {STARTING_ID+7}, FF0000\n'
# ship2's core
txt += f'core, 0, 0, 180, 0.5, 0.5, e{STARTING_ID+7}, {STARTING_ID+1}, FF0033\n'
# ship2 body
txt += f'''
rhombus, 1, 68, 77, 1.21, 0.61, e{STARTING_ID+1}, {STARTING_ID+38}, FF0033
rect, -1, 34, 0, 3.01, 1.01, e{STARTING_ID+38}, {STARTING_ID+39}, FF0033
single, -30, 7, 0, 0.7, 0.7, e{STARTING_ID+39}, {STARTING_ID+33}, FF0033
single, 13, 16, 0, 0.5, 0.5, e{STARTING_ID+39}, {STARTING_ID+32}, FF0033
single, -13, 16, 0, 0.5, 0.5, e{STARTING_ID+39}, {STARTING_ID+31}, FF0033
single, 30, 7, 0, 0.7, 0.7, e{STARTING_ID+39}, {STARTING_ID+30}, FF0033
forcefieldLine, 31, 40, -124, -0.61, 0.5, e{STARTING_ID+39}, {STARTING_ID+28}, FFFFFF
forcefieldLine, -31, 40, 124, 0.61, 0.5, e{STARTING_ID+39}, {STARTING_ID+27}, FFFFFF
oddbox2, -15, 23, -135, -0.5, -0.3, e{STARTING_ID+1}, {STARTING_ID+37}, FF0033
c, -33, 51, 0, 1, 1, e{STARTING_ID+37}, {STARTING_ID+4}, FFFFFF
laser_, 1, 8, 0, 0, 0, e{STARTING_ID+4}, {STARTING_ID+35}, FFFFFF
stick, 0, 0, 0, 0.4, 0.2, e{STARTING_ID+4}, {STARTING_ID+5}, 990000
oddbox2, 15, 23, 135, 0.5, -0.3, e{STARTING_ID+1}, {STARTING_ID+36}, FF0033
c, 34, 51, 0, 1, 1, e{STARTING_ID+36}, {STARTING_ID+3}, FFFFFF
laser_, -2, 8, 0, 0, 0, e{STARTING_ID+3}, {STARTING_ID+34}, FFFFFF
stick, 0, 0, 0, 0.4, 0.2, e{STARTING_ID+3}, {STARTING_ID+6}, 990000
tri, 0, -47, 0, 1.7, 2.6, e{STARTING_ID+1}, {STARTING_ID+26}, FF0033
oddbox2, -17, -6, 135, -0.5, 0.5, e{STARTING_ID+1}, {STARTING_ID+25}, FF0033
oddbox2, 17, -6, -135, 0.5, 0.5, e{STARTING_ID+1}, {STARTING_ID+22}, FF0033
c, 0, 0, 0, 2, 4, e{STARTING_ID+1}, {STARTING_ID+2}, FF99BB
'''
print(txt)
copy(txt)
cmds += ['','',''] # some blank lines to show where this section starts
# must be higher than all parts in above sections
STARSHIP_STARTING_ID = 200
parts['core'] = 0
parts['starship_center'] = STARSHIP_STARTING_ID + 8
starship_ids = (np.array([8,13,12,11,10,9,7,6,4,3,2,16,18,15,17,14])+STARSHIP_STARTING_ID).tolist()
for id in starship_ids:
parts['starship_part_{id}'] = id
append(makeInvincible([parts['core']]+starship_ids))
# raise HP of the star (triangle pieces)
_tmp = (np.array([2,3,4,5,6,7]) + STARSHIP_STARTING_ID).tolist()
append(setHP(_tmp, 10))
append([
makeTriggerDeath(parts['third_replacement'], makeTriggerTime(60, 'waypoints, 1, 3, 100,100, 700,100, 700,500, 100,500')),
makeTriggerDeath(parts['third_replacement'], makeTriggerTime(60, 'randomRotate,20')),
])
# guns start OFF then turn ON when last trigger defeated
starship_guns = (np.array(list(range(9,13+1)) + list(range(14,18+1)))+STARSHIP_STARTING_ID).tolist()
append(disableGun(starship_guns))
append(makeTriggerDeath(parts['third_replacement'], makeTriggerTime(60, enableGun(starship_guns))))
# kill last wall
append(makeTriggerDeath(parts['third_replacement'], destroy(parts['corner_BR'])))
# set star to yellow once last trigger defeated (and remove invincibility)
c_yellow = 'FFCC33'
append(makeTriggerDeath(parts['third_replacement'], makeTriggerTime(60, setColor([parts['core']] + starship_ids, c_yellow))))
append(makeTriggerDeath(parts['third_replacement'], makeTriggerTime(60, makeVulnerable(starship_ids))))
append(makeTriggerDeath(parts['starship_center'], 'waypoints,0,5,400,300'))
append(makeTriggerDeath(parts['starship_center'], stop('randomRotate,20')))
append(makeTriggerDeath(parts['starship_center'], rotate(0,1,-10,10,0)))
# set core to self-destruct after starship defeated
#self_destruct = makeTriggerTime(secToFrames(10), destroy(parts['core']))
#append(makeTriggerDeath(parts['starship_center'], self_destruct))
txt += '\n\n\n'
tmp = f'''
c, 0, 0, 0, 1.1, 1.1, e0, {STARSHIP_STARTING_ID+8}, {c_frozen}
laser_, 47, 17, -87, 0.7, 0.7, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+13}, {c_frozen}
laser_, -1, 51, 0, 0.7, 0.7, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+12}, {c_frozen}
laser_, -31, -40, 150, 0.7, 0.7, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+11}, {c_frozen}
laser_, -47, 17, 87, 0.7, 0.7, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+10}, {c_frozen}
laser_, 31, -40, -150, 0.7, 0.7, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+9}, {c_frozen}
tri, 29, 39, -40, 2, 5, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+7}, {c_frozen}
tri, -40, -16, 119, -2, 5, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+6}, {c_frozen}
tri, -29, 39, 40, -2, 5, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+4}, {c_frozen}
tri, 40, -16, -119, 2, 5, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+3}, {c_frozen}
tri, 0, -46, 180, 2, 5, e{STARSHIP_STARTING_ID+8}, {STARSHIP_STARTING_ID+2}, {c_frozen}
singu, 41, 50, 0, {invis}, e{STARSHIP_STARTING_ID+7}, {STARSHIP_STARTING_ID+16}, 666666
singu, -56, -32, 0, {invis}, e{STARSHIP_STARTING_ID+6}, {STARSHIP_STARTING_ID+18}, 666666
singu, -43, 48, 0, {invis}, e{STARSHIP_STARTING_ID+4}, {STARSHIP_STARTING_ID+15}, 666666
singu, 56, -32, 0, {invis}, e{STARSHIP_STARTING_ID+3}, {STARSHIP_STARTING_ID+17}, 666666
singu, 0, -64, 0, {invis}, e{STARSHIP_STARTING_ID+2}, {STARSHIP_STARTING_ID+14}, 666666
'''
print(tmp)
txt += tmp
import numpy as np
import matplotlib.pyplot as plt
%matplotlib qt
import matplotlib.ticker as plticker
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
ax.set_xlim(0,800)
ax.set_ylim(0,600)
loc = plticker.MultipleLocator(base=800/11) # this locator puts ticks at regular intervals
ax.xaxis.set_major_locator(loc)
plt.grid(True)
coords = []
def onclick(event):
# global ix, iy
ix, iy = event.xdata, event.ydata
# invert
y = 600 - iy
x = ix
print(f'{x:0.1f},{y:0.1f}')
global coords
coords.append((x, y))
# if len(coords) == 2:
# fig.canvas.mpl_disconnect(cid)
ax.scatter([ix], [iy], c='b', s=150)
plt.draw()
return coords
cid = fig.canvas.mpl_connect('button_press_event', onclick)
fig.show()
heart_points = [
(397.4,512.4),
(396.1,487.7),
(385.8,457.7),
(367.7,425.1),
(340.6,382.1),
(320.0,352.1),
(303.2,330.0),
(282.6,311.7),
(259.4,281.8),
(240.0,245.3),
(221.9,204.9),
(221.9,164.5),
(236.1,143.6),
(265.8,126.7),
(303.2,121.5),
(326.5,125.4),
(360.0,146.3),
(396.1,178.8),
(418.1,212.7),
(440.0,191.9),
(452.9,174.9),
(486.5,150.2),
(512.3,130.6),
(545.8,122.8),
(566.5,122.8),
(591.0,145.0),
(603.9,160.6),
(606.5,207.5),
(600.0,236.2),
(583.2,258.3),
(561.3,279.2),
(541.9,317.0),
(529.0,339.1),
(505.8,365.2),
(483.9,387.3),
(465.8,421.2),
(449.0,447.3),
(433.5,468.1),
(419.4,491.6),
]
# must be higher than all parts in above sections
HEART_STARTING_ID = 300
# add break in the text (for help with debugging)
txt += '\n\n\n'
# parent part to hold all the text parts
text_parent_id = HEART_STARTING_ID
txt += f'rightTri, 0, 0, 0, {invis}, e0, {text_parent_id}, 0066FF'
# parent will sit far away until needed
append(setDistance(text_parent_id, FAR_AWAY))
# make text parts
_ids = []
for idx, (x, y) in enumerate(heart_points):
new_id = HEART_STARTING_ID+1+idx
txt += f'c, {x-400:0.2f}, {y-300:0.2f}, 0, {invis}, e{text_parent_id}, {new_id}, FFFFFF'
txt += '\n'
_ids.append(new_id)
########### next, make a whole bunch of triggers that all happen "on_death" (meaning, once the final starship is destroyed)
# helper function to make code more succinct
on_death = lambda cmd: append(makeTriggerDeath(parts['starship_center'], cmd))
## change trigger ID, just for testing (without having to beat the whole level)
#on_death = lambda cmd: append(makeTriggerDeath(339, cmd)) # for testing
# first, move into position
on_death(setDistance(text_parent_id, 0))
# then, make the text appear, one circle at a time, and do color effect
delay = 2
init_time_delay = 120
for idx, id in enumerate(_ids):
t = init_time_delay + (idx+1)*delay
# change size from 0 (this makes it suddenly appear on-screen)
grow = setSize(id, 20)
on_death(makeTriggerTime(t, grow[0]))
on_death(makeTriggerTime(t, grow[1]))
# change color, for effect, a split second later
change_color = setColor(id, 'CC0000')
on_death(makeTriggerTime(t+5, change_color))
# when done, end level
on_death(makeTriggerTime(init_time_delay+(idx+1)*delay+5+30, destroy(0)))
copy_to_clipboard(cmds)
print(txt)
copy(txt)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: But as you'll see today, we can accomplish the same thing like this
Step2: ..or this
Step3: These two code blocks are called the while loop and for loop, respectively. Much like the if / else statement, these blocks allow our code to make decisions for us based on some condition. The reason we have two different types of loops is because they are designed with different situations in mind. The for loop is specialized for cases where you know exactly how many times you want to loop in advance, and makes it very easy to "loop over" the elements of almost any iterable object (more on that in a minute). while loops, on the other hand, are more general and are better for when you want to loop indefinitely until some condition is met.
Step4: Any kind of conditional that works with an if statement is also valid for a while loop. However, we have to be careful when we design our loops to make sure that at some point the conditional will become False. If it doesn't, then our code will literally keep looping until the end of the universe (or we manually terminate the program!). This is called an "endless loop" (see the Lesson3 extra material notebook for some examples).
Step5: A more useful example
Step6: Let's break down what's happening here
Step7: Ways of using the for loop
Step8: Or, to give a more useful example, we may want to do something like count the number of times a certain nucleotide appears
Step9: We can also use this as a roundabout way of just looping a certain number of times
Step10: Here, we just ignored the value of char in the loop, and just made use of the fact that the loop would loop a pre-defined number of times (equal to the number of characters in the string).
Step11: Digging down a little deeper, though, what range(x) actually does is create a list of numbers from 0 to x-1
Step12: We haven't talked about lists yet, so don't worry about this too much yet. All you need to know is that a list is an iterable, so we can use it in a for loop. The unit of iteration in a list is the elements of the list. So this means that in addition to just using range() to loop a certain number of times, we can also use it to generate numbers to use in our loop
Step13: for loops in action
Step14: Here we've essentially creating a counter. We tend to want to do a lot of counting in programming, so keep this example in mind.
Step15: This is similar to the counter example above, but instead of incrementing by 1 every time, we're summing up various numbers.
Step16: This is sort of like an accumulator for strings. We can build up a string in a loop by repeatedly concatenating characters to an existing string.
Step17: A little explanation of what this code is doing
Step18: Now that we understand \n... what was going on in the previous example?
Step19: 5. File writing
Step20: If you run this code, it should print a new file called output.txt to your current directory (probably the directory where this notebook is stored). Go and take a look at it now. It should look like this
Step21: 6. Test your understanding
|
<ASSISTANT_TASK:>
Python Code:
import random
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
print random.randint(0,1)
import random
count = 0
while count < 10:
print random.randint(0,1)
count = count + 1
import random
for i in range(10):
print random.randint(0,1)
x = 0
while x < 4:
print "Looped!"
x = x + 1
x = 0
while x < 2:
print "pizza"
x = x + 1
x = 0
while x < 4:
print x
x = x + 1
x = 0
while x < 4:
x = x + 1
print x
x = 0
while x < 4:
x = x + 1
print x
import random
x = True
while x:
print "x is still True..."
if random.randint(0,4) == 0:
x = False
secretNumber = 56
notGuessed = True
while (notGuessed):
guess = int(raw_input("What number am I thinking of (between 0 and 100)? "))
if (guess == secretNumber):
print "Wow, you got it!"
notGuessed = False
else:
print "Wrong, guess again."
for i in [1, "A", 45, True]:
print i
for i in "Hello!":
print i
for i in range(5):
print i
for nt in "ATGCCTAG":
print nt
sequence = "ATGGTCGATCGGTCGGGCTCGGGATATTACCGCGCGCGCGATGGCTAGGGGGG"
count = 0
for nt in sequence:
if nt == "G":
count = count + 1
print "Found", count, "G's"
stringOfLength5 = "AAAAA"
for char in stringOfLength5:
print "This will print 5 times."
for i in range(5):
print "This will print 5 times."
print range(5)
for i in range(5):
print i
for i in range(4):
print i
for i in range(4):
print i * 2
count = 0
for i in range(4):
count = count + 1
print count
count = 0
for i in range(4):
count = count + i
print count
count = 0
for nt in "CTCCAGGG":
if nt == "C":
count = count + 1
print count
oldSeq = "ATG"
newSeq = ""
for nt in oldSeq:
newSeq = newSeq + nt + "*"
print newSeq
# Read and print genes.txt
fileName = "genes.txt"
inFile = open(fileName, 'r')
for line in inFile:
print line
inFile.close()
print "Hello\nWorld"
print "Hello\n\nWorld"
# Read and print genes.txt
fileName = "genes.txt"
inFile = open(fileName, 'r')
for line in inFile:
line = line.rstrip('\r\n')
print line
inFile.close()
# print some text to a new file
fileName = "output.txt"
outFile = open(fileName, 'w')
outFile.write("This is me,")
outFile.write("printing to \n a file.")
outFile.close()
fileName = "output2.txt"
outFile = open(fileName, 'w')
outFile.write(25)
outFile.close()
# The simple fix: use str()
fileName = "output2.txt"
outFile = open(fileName, 'w')
outFile.write(str(25))
outFile.close()
for i in range(1, 10, 2):
print i
for i in range (5, 1, -1):
print i
count = 0
while (count < 5):
print count
count = count + 1
total = 0
for i in range(4):
total = total + i
print total
name = "Mits"
for letter in name:
print letter
name = "Wilfred"
newName = ""
for letter in name:
newName = newName + letter
print newName
name = "Wilfred"
newName = ""
for letter in name:
newName = letter + newName
print newName
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Find the most common instance name prefixes
Step2: Select the instance name prefix to filter on
Step3: Load the time series data
Step4: Split the data into daily chunks
Step5: Initialize a helper function
Step6: Time-shift all dataframes to line up with the last day
Step7: Compare the CPU utilization day-over-day
Step8: Compare today's CPU Utilization to the weekly average
|
<ASSISTANT_TASK:>
Python Code:
from datalab.stackdriver import monitoring as gcm
# set_datalab_project_id('my-project-id')
import collections
# Initialize the query for CPU utilization over the last week, and read in its metadata.
query_cpu = gcm.Query('compute.googleapis.com/instance/cpu/utilization', hours=7*24)
cpu_metadata = query_cpu.metadata()
# Count the occurrences of each prefix, and display the top 5.
instance_prefix_counts = collections.Counter(
timeseries.metric.labels['instance_name'].rsplit('-', 1)[0]
for timeseries in cpu_metadata)
instance_prefix_counts.most_common(5)
# Set this variable to read data from your own project.
common_prefix = None # 'my-instance-prefix'
if common_prefix is None:
print('No prefix specified. The data will be read from a Cloud Storage bucket.')
else:
print('You selected the prefix: "%s"' % (common_prefix,))
import StringIO
import pandas
import datalab.storage as storage
if common_prefix is None:
print('Reading in data from a Cloud Storage Bucket')
# Initialize the bucket name, and item key.
bucket_name = 'cloud-datalab-samples'
per_zone_data = 'stackdriver-monitoring/timeseries/per-zone-weekly-20161010.csv'
# Load the CSV from the bucket, and intialize the dataframe using it.
per_zone_data_item = storage.Bucket(bucket_name).item(per_zone_data)
per_zone_data_string = StringIO.StringIO(per_zone_data_item.read_from())
per_zone_cpu_data = pandas.DataFrame.from_csv(per_zone_data_string)
else:
print('Reading in data from the Monitoring API')
# Filter the query to instances with the specified prefix.
query_cpu = query_cpu.select_metrics(instance_name_prefix=common_prefix)
# Aggregate to hourly intervals per zone.
query_cpu = query_cpu.align(gcm.Aligner.ALIGN_MEAN, hours=1)
query_cpu = query_cpu.reduce(gcm.Reducer.REDUCE_MEAN, 'resource.zone')
# Get the time series data as a dataframe, with a single-level header.
per_zone_cpu_data = query_cpu.as_dataframe(label='zone')
per_zone_cpu_data.tail(5)
import collections
# Extract the number of days in the dataframe.
num_days = len(per_zone_cpu_data.index)/24
# Split the big dataframe into daily dataframes.
daily_dataframes = [per_zone_cpu_data.iloc[24*i: 24*(i+1)]
for i in xrange(num_days)]
# Reverse the list to have today's data in the first index.
daily_dataframes.reverse()
# Display the last five rows from today's data.
daily_dataframes[0].tail(5)
TODAY = 'Today'
# Helper function to make a readable day name based on offset from today.
def make_day_name(offset):
if offset == 0:
return TODAY
elif offset == 1:
return 'Yesterday'
return '%d days ago' % (offset,)
# Extract the zone names.
all_zones = per_zone_cpu_data.columns.tolist()
# Use the last day's timestamps as the index, and initialize a dataframe per zone.
last_day_index = daily_dataframes[0].index
zone_to_shifted_df = {zone: pandas.DataFrame([], index=last_day_index)
for zone in all_zones}
for i, dataframe in enumerate(daily_dataframes):
# Shift the dataframe to line up with the start of the last day.
dataframe = dataframe.tshift(freq=last_day_index[0] - dataframe.index[0])
current_day_name = make_day_name(i)
# Insert each daily dataframe as a column into the dataframe.
for zone in all_zones:
zone_to_shifted_df[zone][current_day_name] = dataframe[zone]
# Display the first five rows from the first zone.
zone_to_shifted_df[all_zones[0]].head(5)
for zone, dataframe in zone_to_shifted_df.iteritems():
dataframe.plot(title=zone).legend(loc="upper left", bbox_to_anchor=(1,1))
for zone, dataframe in zone_to_shifted_df.iteritems():
# Initialize the dataframe by extracting the column with data for today.
compare_to_avg_df = dataframe.loc[:, [TODAY]]
# Add a column with the weekly avg.
compare_to_avg_df['Weekly avg.'] = dataframe.mean(axis=1)
# Plot this dataframe.
compare_to_avg_df.plot(title=zone).legend(loc="upper left", bbox_to_anchor=(1,1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup data
Step2: Create a mapping dict from id to word
Step4: Get the reviews file
Step5: The labels are 1 for positive, 0 for negative
Step6: Reduce vocab size by setting rare words to max index
Step7: Let's look at the distribution of the sentences length
Step9: Pad or truncate each sentence to make consistent length of 500
Step10: Create simple models
Step11: Single conv layer with max pooling
Step12: $10304 = 53264 + 64$
Step14: Pre-trained vectors
Step15: The glove word ids and imdb word ids use different indexes. So we create a simple function that creates an embedding matrix using the indexes from imdb, and the embeddings from glove (where they exist).
Step16: We pass our embedding matrix to the Embedding constructor, and set it to non-trainable.
Step17: We already have beaten our previous model! But let's fine-tune the embedding weights - especially since the words we couldn't find in glove just have random embeddings
Step18: Multi-size CNN
Step19: How can we further improve?
Step20: We then replace the conv/max-pool layer in our original CNN with the concatenated conv layers
|
<ASSISTANT_TASK:>
Python Code:
from keras.datasets import imdb
idx = imdb.get_word_index()
type(idx)
# Let's look at the word list
sorted(iterable, *, key=None, reverse=False):
built-in function; Return a new sorted list from the items in iterable.
idx_list = sorted(idx, key=idx.get)
print(idx_list[:5])
from itertools import islice
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
print(take(5, idx.items()))
idx2word = {v:k for k, v in idx.items()}
path = get_file('imdb_full.pkl',
origin='https://s3.amazonaws.com/text-datasets/imdb_full.pkl',
md5_hash='d091312047c43cf9e4e38fef92437263')
get_file(fname, origin,...):
keras function; downloads a file from a URL if it not already in the cache.
f = open(path, 'rb')
(x_train, labels_train), (x_test, labels_test) = pickle.load(f)
print(type(x_train))
print(len(x_train))
# print the 1st review
', '.join(map(str, x_train[0]))
# Let's map the idx to words
' '.join(idx2word[o] for o in x_train[0])
labels_train[:10]
vocab_size = 5000
trn = [np.array([i if i<vocab_size-1 else vocab_size-1 for i in s]) for s in x_train]
test = [np.array([i if i<vocab_size-1 else vocab_size-1 for i in s]) for s in x_test]
lens = np.array(list(map(len, trn)))
(lens.max(), lens.min(), lens.mean())
seq_len = 500
keras.preprocessing.sequence.pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.)
Transform a list of num_samples sequences (lists of scalars) into a 2D Numpy array of shape
(num_samples, num_timesteps). num_timesteps is either the maxlen argument if provided,
or the length of the longest sequence otherwise. Sequences that are shorter than
num_timesteps are padded with value at the end. Sequences longer than num_timesteps are
truncated so that it fits the desired length. Position where padding or truncation happens
is determined by padding or truncating, respectively.
trn = sequence.pad_sequences(trn, maxlen=seq_len, value=0)
test = sequence.pad_sequences(test, maxlen=seq_len, value=0)
trn.shape
model = Sequential([
Embedding(vocab_size, 32, input_length=seq_len),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.summary()
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
conv1 = Sequential([
Embedding(vocab_size, 32, input_length=seq_len, dropout=0.2),
Dropout(0.2),
# look at 5 words at a time
Convolution1D(64, 5, border_mode='same', activation='relu'),
Dropout(0.2),
MaxPooling1D(),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')
])
conv1.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
conv1.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64)
conv1.summary()
conv1.save_weights(model_path + 'conv1.h5')
conv1.load_weights(model_path + 'conv1.h5')
def get_glove_dataset(dataset):
Download the requested glove dataset from files.fast.ai
and return a location that can be passed to load_vectors.
# see wordvectors.ipynb for info on how these files were
# generated from the original glove data.
md5sums = {'6B.50d': '8e1557d1228decbda7db6dfd81cd9909',
'6B.100d': 'c92dbbeacde2b0384a43014885a60b2c',
'6B.200d': 'af271b46c04b0b2e41a84d8cd806178d',
'6B.300d': '30290210376887dcc6d0a5a6374d8255'}
glove_path = os.path.abspath('data/glove/results')
%mkdir -p $glove_path
return get_file(dataset,
'http://files.fast.ai/models/glove/' + dataset + '.tgz',
cache_subdir=glove_path,
md5_hash=md5sums.get(dataset, None),
untar=True)
def load_vectors(loc):
return (load_array(loc+'.dat'),
pickle.load(open(loc+'_words.pkl','rb'), encoding='latin1'),
pickle.load(open(loc+'_idx.pkl','rb'), encoding='latin1'))
vecs, words, wordidx = load_vectors(get_glove_dataset('6B.50d'))
def create_emb(vecs, vocab_size):
n_fact = vecs.shape[1]
emb = np.zeros((vocab_size, n_fact))
for i in range(1, len(emb)):
word = idx2word[i]
if word and re.match(r"^[a-zA-Z0-9\-]*$", word):
src_idx = wordidx[word]
emb[i] = vecs[src_idx]
else:
# If we can't find the word in glove, randomly initialize
emb[i] = normal(scale=0.6, size=(n_fact,))
# This is our "rare word" id - we want to randomly initialize
emb[-1] = normal(scale=0.6, size=(n_fact,))
emb/=3
return emb
emb = create_emb(vecs, vocab_size)
model = Sequential([
Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2,
weights=[emb]),
Dropout(0.25),
Convolution1D(64, 5, border_mode='same', activation='relu'),
Dropout(0.25),
MaxPooling1D(),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')])
model.layers[1].trainable=False
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
model.layers[0].trainable = True
model.optimizer.lr = 1e-4
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64)
model.save_weights(model_path+'glove50.h5')
from keras.layers import Merge
graph_in = Input((vocab_size, 50))
convs = []
for fsz in range(3, 6):
x = Convolution1D(64, fsz, border_mode='same', activation='relu')(graph_in)
x = MaxPooling1D()(x)
x = Flatten()(x)
convs.append(x)
out = Merge(mode='concat')(convs)
graph = Model(graph_in, out)
emb = create_emb(vecs, vocab_size)
model = Sequential([
Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2, weights=[emb]),
Dropout (0.2),
graph,
Dropout (0.5),
Dense (100, activation="relu"),
Dropout (0.7),
Dense (1, activation='sigmoid')
])
model.layers[1].trainable=False
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64)
model.layers[0].trainable=True
model.optimizer.lr=1e-5
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple CNN
Step3: Function to load data
Step5: Function to build network
Step7: Dataset iteration
Step8: Main function
|
<ASSISTANT_TASK:>
Python Code:
# A bit of setup
# Usual imports
import time
import numpy as np
import matplotlib.pyplot as plt
# Notebook plotting magic
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# For auto-reloading external modules
%load_ext autoreload
%autoreload 2
# Deep learning related
import theano
import theano.tensor as T
import lasagne
# My modules
import generate_data as d
def rel_error(x, y):
Returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8. np.abs(x) + np.abs(y))))
def load_dataset(num=5):
Load a bit of data from SALAMI.
Argument: num (number of songs to load. Default=5)
Returns: X_train, y_train, X_val, y_val, X_test, y_test
X, y = d.get_data(num)
# Keep last 6000 data points for test
X_test, y_test = X[-6000:], y[-6000:]
X_train, y_train = X[:-6000], y[:-6000]
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# Make column vectors
y_train = y_train[:,np.newaxis]
y_val = y_val[:,np.newaxis]
y_test = y_test[:,np.newaxis]
return X_train, y_train, X_val, y_val, X_test, y_test
def build_cnn(input_var=None):
Build the CNN architecture.
# Make an input layer
network = lasagne.layers.InputLayer(
shape=(
None,
1,
20,
515
),
input_var=input_var
)
# Add a conv layer
network = lasagne.layers.Conv2DLayer(
network, # Incoming
num_filters=32, # Number of convolution filters to use
filter_size=(5, 5),
stride=(1, 1), # Stride fo (1,1)
pad='same', # Keep output size same as input
nonlinearity=lasagne.nonlinearities.rectify, # ReLU
W=lasagne.init.GlorotUniform() # W initialization
)
# Apply max-pooling of factor 2 in second dimension
network = lasagne.layers.MaxPool2DLayer(
network, pool_size=(1, 2)
)
# Then a fully-connected layer of 256 units with 50% dropout on its inputs
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify
)
# Finally add a 1-unit softmax output layer
network = lasagne.layers.DenseLayer(
network,
num_units=1,
nonlinearity=lasagne.nonlinearities.softmax
)
return network
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
Generate a minibatch.
Arguments: inputs (numpy array)
targets (numpy array)
batchsize (int)
shuffle (bool, default=False)
Returns: inputs[excerpt], targets[excerpt]
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# Theano config
theano.config.floatX = 'float32'
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(3)
# Print the dimensions
for datapt in [X_train, y_train, X_val, y_val, X_test, y_test]:
print datapt.shape
# Parse dimensions
n_train = y_train.shape[0]
n_val = y_val.shape[0]
n_test = y_test.shape[0]
n_chan = X_train.shape[1]
n_feats = X_train.shape[2]
n_frames = X_train.shape[3]
print "n_train = {0}".format(n_train)
print "n_val = {0}".format(n_val)
print "n_test = {0}".format(n_test)
print "n_chan = {0}".format(n_chan)
print "n_feats = {0}".format(n_feats)
print "n_frames = {0}".format(n_frames)
# Prepare Theano variables for inputs and targets
input_var = T.tensor4( name='inputs' )
target_var = T.fcol( name='targets' )
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions..."),
network = build_cnn(input_var)
print("Done.")
# Create a loss expression for training, i.e., a scalar objective we want to minimize
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.squared_error(prediction, target_var)
loss = loss.mean()
# Create update expressions for training
# Here, we'll use adam
params = lasagne.layers.get_all_params(
network,
trainable=True
)
updates = lasagne.updates.adam(
loss,
params
)
# Create a loss expression for validation/testing.
# The crucial difference here is that we do a deterministic forward pass
# through the network, disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.squared_error(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function(
[input_var, target_var],
loss,
updates=updates,
allow_input_downcast=True
)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function(
[input_var, target_var],
[test_loss, test_acc],
allow_input_downcast=True
)
num_epochs = 1
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
print("Done training.")
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
trained_params = lasagne.layers.get_all_param_values(network)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data loading
Step2: I want to be sure that headers are consistent for all KPI files. I raise an exception if that is not the case.
Step3: I load now the other CSV files as pandas dataframes.
Step4: Data cleaning
Step5: The value field contains sometimes the string "None". For the purposes of this analysis it should be fine to set it to zero. Then I convert both period and value to numeric types. They will be used below during the analysis.
Step6: Check that municipality names of KPIs data match those of simplified municipality indicators (SMIs).
Step7: Data Analisys
Step8: Municipalities with the highest number of cases
Step9: Cases by year
Step10: Histogram no. of cases by municipalities
Step11: Correlation population / no. of cases
Step12: Mean value of KPIs
Step13: Features analysis
Step14: Lasso regression
Step15: Bar chart of importance by feature
Step16: Correlation of SMIs
Step17: I show in the following table the most important correlations, filtering out auto-correlations. Obvious
Step18: Features clusters as clustermap
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from sklearn import linear_model
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", color_codes=True)
%matplotlib inline
data_dir = './'
kpi_files = [data_dir + kpi for kpi in ['kpis_1998_2003.csv',
'kpis_2004_2008.csv',
'kpis_2009_2011.csv',
'kpis_2012_2013.csv']]
# Check that all headers are the same
curHeaders = None
for csv in kpi_files:
with open(csv, 'r') as f:
for line in f:
if curHeaders is None:
curHeaders = line.strip()
elif curHeaders != line.strip():
raise Exception('KPI headers mismatch')
break
kpis = pd.concat([pd.read_csv(f) for f in kpi_files])
'Total number of KPIs: {}'.format(len(kpis.index))
municipality_indicators = pd.read_csv(data_dir + 'municipality_indicators.csv')
simplified_municipality_indicators = pd.read_csv(data_dir + 'simplified_municipality_indicators.csv')
school_fire_cases = pd.read_csv(data_dir + 'school_fire_cases_1998_2014.csv')
# Rows to be removed will have field 'kpi' equal to string 'kpi', 'period' equal to 'period'
# and so on. One single check on the first attribute should be enough.
kpis = kpis[kpis['kpi'] != 'kpi']
'Total number of KPIs after cleaning: {}'.format(len(kpis.index))
print(kpis.dtypes)
kpis['period'] = kpis['period'].astype(int)
kpis['value'] = kpis['value'].replace(['None'], [0.]).astype(float)
print(kpis.dtypes)
names_simplified_municipality_indicators = set(simplified_municipality_indicators['name'])
names_kpis = set(kpis['municipality_name'])
assert names_simplified_municipality_indicators ^ names_kpis == set()
municipality_types = set(simplified_municipality_indicators['municipalityType'])
'Number of municipality types = {}'.format(len(municipality_types))
total_fire_cases = school_fire_cases['Cases'].sum()
cases_years = school_fire_cases['Year']
print('Number of unique years = {}'.format(len(cases_years.unique())))
period_desc = '{}-{}'.format(cases_years.min(), cases_years.max())
print('Number of total fire cases in period {} = {}'.format(period_desc, total_fire_cases))
print('Total number of municipalities = {}'.format(len(school_fire_cases['Municipality'].unique())))
total_cases_by_municipality = school_fire_cases.groupby('Municipality').sum()['Cases'].sort_values(ascending=False)
max_cases_per_year = school_fire_cases.sort_values(by='Cases', ascending=False) \
.groupby('Year', as_index=False) \
.first()
print('The following municipalities were the ones the highest number of cases during the period {}:\n{}' \
.format(period_desc, max_cases_per_year['Municipality'].unique()))
piechart_data = total_cases_by_municipality[:20]
others = total_cases_by_municipality[20:]
piechart_data.set_value('Others', others.sum())
f, ax = plt.subplots(figsize=(11, 4))
plt.axis('equal');
plt.pie(piechart_data, labels=piechart_data.index);
cases_by_year = school_fire_cases.groupby('Year')
f, ax = plt.subplots(figsize=(11, 4))
plt.xlabel('Year')
plt.ylabel('No. of cases')
_ = plt.plot(cases_by_year.sum()['Cases'])
print('Average cases = {}, standard deviation = {}, median = {}, 75th percentile = {}'.format(total_cases_by_municipality.mean(),
total_cases_by_municipality.std(),
total_cases_by_municipality.quantile(.5),
total_cases_by_municipality.quantile(.75)))
f, ax = plt.subplots(figsize=(11, 4))
plt.xlabel('No. of cases')
plt.ylabel('No. of municipalities')
_ = plt.hist(total_cases_by_municipality, bins=100)
population = school_fire_cases['Population']
print('Max population = {}, min population = {}'.format(population.max(), population.min()))
cases = school_fire_cases['Cases']
print('Max cases = {}, min cases = {}'.format(cases.max(), cases.min()))
reg = linear_model.LinearRegression()
features = np.array([[pp] for pp in population.values])
targets = np.array([[cc] for cc in cases.values])
reg.fit(features, targets)
print('Slope = {}, intercept = {}, score (R^2) = {}'.format(reg.coef_[0], reg.intercept_, reg.score(features, targets)))
f, ax = plt.subplots(figsize=(11, 4))
plt.xlim([0, 1000000])
plt.ylim([0,60])
plt.scatter(population, cases)
_ = plt.plot(features, reg.predict(features), color='r')
kpis_by_municipality = kpis['value'].groupby(kpis['municipality_name'])
kpis_by_period = kpis['value'].groupby(kpis['period'])
f, ax = plt.subplots(figsize=(11, 4))
plt.xlabel('Year')
plt.ylabel('Mean KPI value')
_ = plt.plot(kpis_by_period.mean())
mun_indicators_features_list = ['medianIncome',
'youthUnemployment2010',
'youthUnemployment2013',
'unemploymentChange',
'reportedCrime',
'populationChange',
'hasEducation',
'asylumCosts',
'urbanDegree',
'foreignBorn',
'reportedCrimeVandalism',
'youngUnskilled',
'latitude',
'longitude',
'population',
'populationShare65plus',
'refugees',
'rentalApartments',
'fokusRanking',
'foretagsklimatRanking',
'cars',
'motorcycles',
'tractors',
'snowmobiles']
mun_indicators_features = simplified_municipality_indicators.loc[:, mun_indicators_features_list].as_matrix()
y_cases = [total_cases_by_municipality[m] for m in simplified_municipality_indicators['name']]
lasso = linear_model.Lasso(alpha=0.1)
lasso.fit(mun_indicators_features, y_cases)
lasso.coef_
features_by_coef = sorted(zip(mun_indicators_features_list, lasso.coef_), key=lambda tup: tup[1], reverse=True)
chart_x = [t[0] for t in features_by_coef]
chart_y = [t[1] for t in features_by_coef]
f, ax = plt.subplots(figsize=(11, 4))
plt.xticks(range(len(chart_x)), chart_x)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
_ = ax.bar(range(len(chart_x)), chart_y, 0.3, color="blue")
indicators_and_cases = simplified_municipality_indicators.loc[:, mun_indicators_features_list]
cor_mat = simplified_municipality_indicators.loc[:, mun_indicators_features_list].corr()
f, ax = plt.subplots(figsize=(15, 12))
sns.heatmap(cor_mat,linewidths=.5, ax=ax);
threshold = 0.7
important_corrs = (cor_mat[abs(cor_mat) > threshold][cor_mat != 1.0]) \
.unstack().dropna().to_dict()
unique_important_corrs = pd.DataFrame(
list(set([(tuple(sorted(key)), important_corrs[key]) \
for key in important_corrs])), columns=['attribute pair', 'correlation'])
# sorted by absolute value
unique_important_corrs = unique_important_corrs.ix[
abs(unique_important_corrs['correlation']).argsort()[::-1]]
unique_important_corrs
# See https://www.kaggle.com/cast42/santander-customer-satisfaction/exploring-features
import matplotlib.patches as patches
from scipy.cluster import hierarchy
from scipy.stats.mstats import mquantiles
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
# scale to mean 0, variance 1
train_std = pd.DataFrame(scale(indicators_and_cases))
train_std.columns = indicators_and_cases.columns
m = train_std.corr()
l = linkage(m, 'ward')
mclust = sns.clustermap(m,
linewidths=0,
cmap=plt.get_cmap('RdBu'),
vmax=1,
vmin=-1,
figsize=(14, 14),
row_linkage=l,
col_linkage=l)
# http://stackoverflow.com/a/34697479/297313
_ = plt.setp(mclust.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll read in the data using pandas and look at the first 5 rows of the dataframe with the dataframe-specific function .head(). Whenever I read a new table or modify a dataframe, I ALWAYS look at it to make sure it was correctly imported and read in, and I want you to get into the same habit.
Step2: That's kind of annoying ... we don't see all the samples.
Step3: Now we can see all the samples!
Step4: Wow, ~28k rows! That must be the genes, while there are 18 single cell samples and 3 pooled samples as the columns. We'll do some filtering in the next few steps.
Step5: Let's transpose this matrix so the samples are the rows, and the features are the columns. We'll do that with .T
Step6: Now we'll do some mild data cleaning. Notice that the columns have the exclamation point at the beginning, so let's get rid of that. In computer science, you keep letters between quotes, and you call those "strings." Let's talk about the string function .strip(). This removes any characters that are on the outer edges of the string. For example, let's take the string "Whoooo!!!!!!!"
Step7: Now let's remove the exclamation points
Step8: Exercise 1
Step9:
Step10: We can access the column names with dataframe.columns, like below
Step11: We can map the stripping function to every item of the columns. In Python, the square brackets ([ and ]) show that we're making a list. What we're doing below is called a "list comprehension."
Step12: In pandas, we can do the same thing by map-ping a lambda, which is a small, anonymous function that does one thing. It's called "anonymous" because it doesn't have a name. map runs the function on every element of the columns.
Step13: The above lambda is the same as if we had written a named function called remove_exclamation, as below.
Step14: Now we can assign the new column names to our matrix
Step15: Okay, now we're ready to do some analysis!
Step16: To specify a certain number of rows, put a number between the parentheses.
Step17: Exercise 2
Step18:
Step19: Let's get a sense of this data by plotting the distributions using boxplot from seaborn. To save the output, we'll need to get access to the current figure, and save this to a variable using plt.gcf(). And then we'll save this figure with fig.savefig("filename.pdf"). You can use other extensions (e.g. ".png", ".tiff" and it'll automatically save as that forma)
Step20: Notice the 140,000 maximum ... Oh right we have expression data and the scales are enormous... Let's add 1 to all values and take the log2 of the data. We add one because log(0) is undefined and then all our logged values start from zero too. This "$\log_2(TPM + 1)$" is a very common transformation of expression data so it's easier to analyze.
Step21: Exercise 3
Step22: What's nice about booleans is that False is 0 and True is 1, so we can sum to get the number of "Trues." This is a simple, clever way that we can filter on a count for the data. We could use this boolean dataframe to filter our original dataframe, but then we lose information. For all values that are greater than 2, it puts in a "not a number" - "NaN."
Step23: Exercise 4
Step24:
Step25: The crude filtering above is okay, but we're smarter than that. We want to use the filtering in the paper
Step26: pandas is column-oriented and by default, it will give you a sum for each column. But we want a sum for each row. How do we do that?
Step27: Now we can apply ANOTHER filter and find genes that are "present" (expression greater than 10) in at least 5 samples. We'll save this as the variable genes_of_interest. Notice that this doesn't the genes_of_interest but rather the list at the bottom. This is because what you see under a code cell is the output of the last thing you called. The "hash mark"/"number sign" "#" is called a comment character and makes the rest of the line after it not read by the Python language.
Step28: Getting only rows that you want (aka subsetting)
Step29: Wow, our matrix is very small - 197 genes! We probably don't want to filter THAT much... I'd say a range of 5,000-15,000 genes after filtering is a good ballpark. Not too big so it's impossible to work with but not too small that you can't do any statistics.
Step30:
Step31: Just for fun, let's see how our the distributions in our expression matrix have changed. If you want to save the figure, you can
Step32: Discussion
Step33: We'll access the columns we want using this bracket notation (note that this only works for columns, not rows)
Step34: We could do the same thing using .loc but we would need to put a colon "
Step35: Exercise 7
Step36:
Step37: Using two different dataframes for filtering
Step38:
Step39: Let's make a boxplot again to see how the data has changed.
Step40: This is much nicer because now we don't have so many zeros and each sample has a reasonable dynamic range.
Step41: Pretty funky looking huh? That's why we logged it
Step42: Hmm our pearson correlation increased from 0.62 to 0.64. Why could that be?
|
<ASSISTANT_TASK:>
Python Code:
# Alphabetical order is standard
# We're doing "import superlongname as abbrev" for our laziness - this way we don't have to type out the whole thing each time.
# Python plotting library
import matplotlib.pyplot as plt
# Numerical python library (pronounced "num-pie")
import numpy as np
# Dataframes in Python
import pandas as pd
# Statistical plotting library we'll use
import seaborn as sns
# This is necessary to show the plotted figures inside the notebook -- "inline" with the notebook cells
%matplotlib inline
# Read the data table
# You may need to change the path to the file (what's in quotes below) relative
# to where you downloaded the file and where this notebook is
shalek2013_expression = pd.read_table('/home/ecwheele/cshl2017/GSE41265_allGenesTPM.txt.gz',
# Sets the first (Python starts counting from 0 not 1) column as the row names
index_col=0,
# Tells pandas to decompress the gzipped file
compression='gzip')
print(shalek2013_expression.shape)
shalek2013_expression.head()
pd.options.display.max_columns = 50
pd.options.display.max_rows = 50
shalek2013_expression.head()
shalek2013_expression.shape
shalek2013_metadata = pd.read_table('/home/ecwheele/cshl2017/GSE41265_series_matrix.txt.gz',
compression = 'gzip',
skiprows=33,
index_col=0)
print(shalek2013_metadata.shape)
shalek2013_metadata
shalek2013_metadata = shalek2013_metadata.T
shalek2013_metadata
"Whoooo!!!!!!!"
'Whoooo!!!!!!!'.strip('!')
# YOUR CODE HERE
'Whoooo!!!!!!!'.strip('o')
'Whoooo!!!!!!!'.replace("o","")
shalek2013_metadata.columns
[x.strip('!') for x in shalek2013_metadata.columns]
shalek2013_metadata.columns.map(lambda x: x.strip('!'))
def remove_exclamation(x):
return x.strip('!')
shalek2013_metadata.columns.map(remove_exclamation)
shalek2013_metadata.columns = shalek2013_metadata.columns.map(lambda x: x.strip('!'))
shalek2013_metadata.head()
shalek2013_expression.head()
shalek2013_expression.head(8)
# YOUR CODE HERE
shalek2013_expression.head(17)
sns.boxplot(shalek2013_expression)
# gcf = Get current figure
fig = plt.gcf()
fig.savefig('shalek2013_expression_boxplot.pdf')
expression_logged = np.log2(shalek2013_expression+1)
expression_logged.head()
sns.boxplot(expression_logged)
# gcf = Get current figure
fig = plt.gcf()
fig.savefig('expression_logged_boxplot.pdf')
at_most_2 = expression_logged < 2
at_most_2
expression_at_most_2 = expression_logged[expression_logged < 2]
print(expression_at_most_2.shape)
expression_at_most_2.head()
# YOUR CODE HERE
expression_logged.head()
expression_greater_than_5 = expression_logged[expression_logged > 5]
expression_greater_than_5.head()
(expression_logged > 10).sum()
(expression_logged > 10).sum(axis=1)
genes_of_interest = (expression_logged > 10).sum(axis=1) >= 5
#genes_of_interest
[1, 2, 3]
expression_filtered = expression_logged.loc[genes_of_interest]
print(expression_filtered.shape) # shows (nrows, ncols) - like in manhattan you do the Street then the Avenue
expression_filtered.head()
# YOUR CODE HERE
print(expression_filtered_by_all_samples.shape)
expression_filtered_by_all_samples.head()
genes_of_interest = (expression_logged > 1).sum(axis=1) >= 3
expression_filtered_by_all_samples = expression_logged.loc[genes_of_interest]
print(expression_filtered_by_all_samples.shape)
expression_filtered_by_all_samples.head()
sns.boxplot(expression_filtered_by_all_samples)
# gcf = Get current figure
fig = plt.gcf()
fig.savefig('expression_filtered_by_all_samples_boxplot.pdf')
pooled_ids = [x for x in expression_logged.columns if x.startswith('P')]
pooled_ids
pooled = expression_logged[pooled_ids]
pooled.head()
expression_logged.loc[:, pooled_ids].head()
# YOUR CODE HERE
print(singles.shape)
singles.head()
single_ids = [x for x in expression_logged.columns if x.startswith('S')]
singles = expression_logged[single_ids]
print(singles.shape)
singles.head()
# YOUR CODE HERE
print(expression_filtered_by_singles.shape)
expression_filtered_by_singles.head()
rows = (singles > 1).sum(axis=1) > 3
expression_filtered_by_singles = expression_logged.loc[rows]
print(expression_filtered_by_singles.shape)
expression_filtered_by_singles.head()
sns.boxplot(expression_filtered_by_singles)
fig = plt.gcf()
fig.savefig('expression_filtered_by_singles_boxplot.pdf')
sns.jointplot(shalek2013_expression['S1'], shalek2013_expression['S2'])
sns.jointplot(expression_logged['S1'], expression_logged['S2'])
sns.jointplot(expression_filtered_by_singles['S1'], expression_filtered_by_singles['S2'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load all the Python modules we will use for the analysis. Note that both RADICAL Utils and RADICAL Pilot need to be loaded alongside RADICAL Analytics.
Step2: We configure matplotlib so to produce visually consistent diagrams that look readable and we can directly include in a paper written in LaTeX.
Step3: Experiment Settings
Step4: Analysis
Step5: Entities
Step6: We choose 'session', 'pilot', and 'unit'. At the moment, we do not need 'umgr', 'pmgr', 'update' as we want to measure and compare the overall duration of each session and the lifespan of pilots and units. Depending on the results of our analysis, we may want to extend these measurements and comparisons also to the RP managers.
Step7: We also add a column to the session dataframe with the number of units for each session.
Step8: We now have all the data to plot the TTC of all the experiments runs. We plot the runs of Experiment 1 on the left in blue and those of Experiment 2 on the right in orange.
Step9: We see
Step10: The variations among TTC of workloads with the same number of CUs and between the two experiments are marked. Here we create a table with various measures of this variation for all the experiments and workload sizes.
Step11: For 8 and 16 CUs, the TTC of the second experiment shows a mean twice as large as those of the first experiment. Less pronounced is the difference for mean of the TTC of 32 CUs. The mean of the TTC of the first experiment is 25% smaller that the one of the second experiment for 64 CUs.
Step12: We notice that STD/mean among repetitions of the same run in Experiment 1 goes from 6.55% to 19.77%, increasing with the increase of the number of CUs. In Experiment 2, STD/mean goes from 20.63% up to 56.18%, independently from the amount of CUs executed by the repeated run. This shows
Step13: Pilot State Durations
Step14: Total Time Queueing (TTQ)
Step15: Across runs and experiments, TTQ is
Step16: Even with an average of just 4 runs for each workload size, TTQ variance is relatively small with a couple of exceptions, as shown by STD/mean
Step17: As with TTC, more experiments and runs are needed. Importantly, due to the dynamic variables of OSG behavior (e.g., number, type or resources available to the broker at any given point in time), it would be useful to perform the runs sequentially so to collect the data (relatively) independent from these dynamics; or characterize these dynamics with long term measurements taken at discrete intervals.
Step18: We calculate add the name of the resource (hostID) on which the pilot (agent) have become active to the pilots DataFrame. Often, the hostID recoreded by RADICAL-Pilot is not the public name of the resource on whic the pilot becomes active but, instead, the name of a working/compute node/unit of that resource. We use an heuristic to isolate the portion of the hostID string that is common to all the nodes/units of the same resource. It should be noted though, that in some cases this is not possible.
Step19: We use this heuristic with the pilots DataFrame to which we add two columns
Step20: We plot the frequency of Tq for both experiments as histogram. Ideally, this should be the first step towards the definition of the characteristic distribution of Tq. Part of this characterization will be to study how stable this distribution is across time, i.e., between two experiments executed at different point in time. As we know that the pool or resources of OSG Is both heterogeneous and dynamic, we expect this distribution not to be stable across time due to the potentially different pool of resources available at any point in time.
Step21: The diagrams hint to bimodal distributions, more measurements are required to study this further.
Step22: As expected, TTR is largely equivalent to TTC-TTQ. This tells us that we will have to investigate the time spent describing, binding, scheduling, and executing CUs, measuring whether pilots TTR is spent effectively running CUs or managing them. Also, we will have to measure how much time is spent staging data in and out to and from the resources. In these experiments, data staging was not performed so we will limit our analysis to the execution time of CUs.
Step23: We add the numbers of pilot that become active in each run to the plot we used above to show TTQ, TTR, and TTC.
Step24: As expected, the largest differences we observed in TTC and TTR among the runs with the same number of CU and among experiments map to the number of pilots used to execute CUs. Our analysis show that the two experiments have a different number of independent variables. Any comparison has to take into account whether the measure observed depends on the number of active pilots used to execute CUs.
Step25: Unit Durations
Step26: Total Time eXecuting (TTX)
Step27: The diagram confirms the similarity between the size of TTR and TTX. More analytically
Step28: We now have to characterize the variation among TTX of the runs with the same number of CUs and between the two experiments. We plot just TTX focusing on these variations.
Step29: We notice lage variations both within and across experiments. Specifically
Step30: Variation within Experiment 1 is between 7 and 18% of mean, almost proportionally increasing with the increasing of the number of unit executed. Variation in Experiment 2 is more pronounced, ranging from 25 to 78% of the mean. Clearly, these values are not indicative.
Step31: Experiment 2 shows better concurrency than Experiment 1. This might be due to the evolution of the RADICAL Pilot code or more performant resources used by the runs of Experiment 2. These data shows at least three elements that need further development
Step32: Note the NaN value for IF_AGENT_SCHEDULING and OF_UMGR_SCHEDULING. The timestamp of these states is broken in the RADICAL Pilot branch used for this experiments. Without analytics it would be difficult to spot and/or understand the error.
Step33: Measures of Spread
Step34: Skewness and Kurtosis
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import sys
import glob
import pprint
import numpy as np
import scipy as sp
import pandas as pd
import scipy.stats as sps
import statsmodels.api as sm
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
import matplotlib.gridspec as gridspec
import radical.utils as ru
import radical.pilot as rp
import radical.analytics as ra
from IPython.display import display
# Global configurations
# ---------------------
# Use LaTeX and its body font for the diagrams' text.
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = ['Nimbus Roman Becker No9L']
# Use thinner lines for axes to avoid distractions.
mpl.rcParams['axes.linewidth'] = 0.75
mpl.rcParams['xtick.major.width'] = 0.75
mpl.rcParams['xtick.minor.width'] = 0.75
mpl.rcParams['ytick.major.width'] = 0.75
mpl.rcParams['ytick.minor.width'] = 0.75
# Do not use a box for the legend to avoid distractions.
mpl.rcParams['legend.frameon'] = False
# Helpers
# -------
# Use coordinated colors. These are the "Tableau 20" colors as
# RGB. Each pair is strong/light. For a theory of color see:
# http://www.tableau.com/about/blog/2016/7/colors-upgrade-tableau-10-56782
# http://tableaufriction.blogspot.com/2012/11/finally-you-can-use-tableau-data-colors.html
tableau20 = [(31 , 119, 180), (174, 199, 232), # blue [ 0,1 ]
(255, 127, 14 ), (255, 187, 120), # orange [ 2,3 ]
(44 , 160, 44 ), (152, 223, 138), # green [ 4,5 ]
(214, 39 , 40 ), (255, 152, 150), # red [ 6,7 ]
(148, 103, 189), (197, 176, 213), # purple [ 8,9 ]
(140, 86 , 75 ), (196, 156, 148), # brown [10,11]
(227, 119, 194), (247, 182, 210), # pink [12,13]
(127, 127, 127), (199, 199, 199), # gray [14,15]
(188, 189, 34 ), (219, 219, 141), # yellow [16,17]
(23 , 190, 207), (158, 218, 229)] # cyan [18,19]
# Scale the RGB values to the [0, 1] range, which is the format
# matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Return a single plot without right and top axes
def fig_setup():
fig = plt.figure(figsize=(13,7))
ax = fig.add_subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
return fig, ax
def load_data(rdir):
sessions = {}
experiments = {}
start = rdir.rfind(os.sep)+1
for path, dirs, files in os.walk(rdir):
folders = path[start:].split(os.sep)
if len(path[start:].split(os.sep)) == 2:
sid = os.path.basename(glob.glob('%s/*.json' % path)[0])[:-5]
if sid not in sessions.keys():
sessions[sid] = {}
sessions[sid] = ra.Session(sid, 'radical.pilot', src=path)
experiments[sid] = folders[0]
return sessions, experiments
# Load experiments' dataset into ra.session objects
# stored in a DataFrame.
rdir = 'data/'
sessions, experiments = load_data(rdir)
sessions = pd.DataFrame({'session': sessions,
'experiment': experiments})
# Check the first/last 3 rows
display(sessions.head(3))
display(sessions.tail(3))
os.environ['RADICAL_ANALYTICS_VERBOSE']='ERROR'
for s in sessions['session']:
s.consistency(['state_model','timestamps'])
expment = None
pexpment = None
for sid in sessions.index:
etypes = sessions.ix[sid, 'session'].list(['etype'])
expment = sessions.ix[sid, 'experiment']
if expment != pexpment:
print '%s|%s|%s' % (expment, sid, etypes)
pexpment = expment
for sid in sessions.index:
sessions.ix[sid, 'TTC'] = sessions.ix[sid, 'session'].ttc
display(sessions[['TTC']].head(3))
display(sessions[['TTC']].tail(3))
for sid in sessions.index:
sessions.ix[sid, 'nunits'] = len(sessions.ix[sid, 'session'].filter(etype='unit', inplace=False).get())
display(sessions[['nunits']].head(3))
display(sessions[['nunits']].tail(3))
fig = plt.figure(figsize=(13,14))
fig.suptitle('TTC XSEDE OSG Virtual Cluster', fontsize=14)
plt.subplots_adjust(wspace=0.3, top=0.85)
ttc_subplots = []
for exp in sessions['experiment'].sort_values().unique():
ttc_subplots.append(sessions[ sessions['experiment'] == exp ].sort_values('TTC'))
colors = {'exp1': [tableau20[19]],
'exp2': [tableau20[7] ],
'exp3': [tableau20[13]],
'exp4': [tableau20[17]],
'exp5': [tableau20[15]]}
ax = []
for splt in range(4):
session = ttc_subplots.pop(0)
experiment = session['experiment'].unique()[0]
ntasks = ', '.join([str(int(n)) for n in session['nunits'].unique()])
color = colors[experiment]
title = 'Experiment %s\n%s tasks; %s sessions.' % (experiment[3], ntasks, session.shape[0])
if not ax:
ax.append(fig.add_subplot(2, 2, splt+1))
else:
ax.append(fig.add_subplot(2, 2, splt+1, sharey=ax[0]))
session['TTC'].plot(kind='bar', color=color, ax=ax[splt], title=title)
ax[splt].spines["top"].set_visible(False)
ax[splt].spines["right"].set_visible(False)
ax[splt].get_xaxis().tick_bottom()
ax[splt].get_yaxis().tick_left()
ax[splt].set_xticklabels([])
ax[splt].set_xlabel('Sessions')
ax[splt].set_ylabel('Time (s)')
ax[splt].legend(bbox_to_anchor=(1.25, 1))
# Add table with statistical description of TTC values.
table = pd.tools.plotting.table(ax[splt],
np.round(session['TTC'].describe(), 2),
loc='upper center',
colWidths=[0.2, 0.2, 0.2])
# Eliminate the border of the table.
for key, cell in table.get_celld().items():
cell.set_linewidth(0)
fig.add_subplot(ax[splt])
plt.savefig('figures/osg_ttc_experiments.pdf', dpi=600, bbox_inches='tight')
fig = plt.figure(figsize=(13,14))
title = 'XSEDE OSG Virtual Cluster'
subtitle = 'TTC'
defs = {'ttq': 'TTQ = Total Time Queuing pilots',
'ttr': 'TTR = Total Time Running pilots',
'ttc': 'TTC = Total Time Completing experiment'}
fig.suptitle('%s:\n%s.\n%s.' % (title,
subtitle,
defs['ttc']), fontsize=14)
gs = []
grid = gridspec.GridSpec(2, 2)
grid.update(wspace=0.4, top=0.85)
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[0]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[1]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[2]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 5, subplot_spec=grid[3]))
ttc_subplots = []
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
if not sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].empty:
ttc_subplots.append(sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].sort_values('TTC'))
colors = {'exp1': [tableau20[19]],
'exp2': [tableau20[7] ],
'exp3': [tableau20[13]],
'exp4': [tableau20[17]],
'exp5': [tableau20[15]]}
nun_exp = []
nun_exp.append(len(sessions[sessions['experiment'] == 'exp1']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp2']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp3']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp4']['nunits'].sort_values().unique()))
ax = []
i = 0
while(i < len(ttc_subplots)):
for gn in range(4):
for gc in range(nun_exp[gn]):
session = ttc_subplots.pop(0)
experiment = session['experiment'].unique()[0]
ntasks = int(session['nunits'].unique()[0])
repetitions = session.shape[0]
color = colors[experiment]
title = 'Exp. %s\n%s tasks\n%s reps.' % (experiment[3], ntasks, repetitions)
if i == 0:
ax.append(plt.Subplot(fig, gs[gn][0, gc]))
else:
ax.append(plt.Subplot(fig, gs[gn][0, gc], sharey=ax[0]))
session['TTC'].plot(kind='bar', ax=ax[i], color=color, title=title)
ax[i].spines["top"].set_visible(False)
ax[i].spines["right"].set_visible(False)
ax[i].get_xaxis().tick_bottom()
ax[i].get_yaxis().tick_left()
ax[i].set_xticklabels([])
ax[i].set_xlabel('Runs')
# Handle a bug that sets yticklabels to visible
# for the last subplot.
if i == 7 or i == 16:
plt.setp(ax[i].get_yticklabels(), visible=False)
else:
ax[i].set_ylabel('Time (s)')
# Handle legens.
if i == 7 or i == 3 or i == 11:
ax[i].legend(labels=['TTC'], bbox_to_anchor=(2.25, 1))
elif i == 16:
ax[i].legend(labels=['TTC'], bbox_to_anchor=(2.70, 1))
# else:
# ax[i].get_legend().set_visible(False)
fig.add_subplot(ax[i])
i += 1
plt.savefig('figures/osg_ttc_nunits.pdf', dpi=600, bbox_inches='tight')
ttc_stats = {}
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
tag = exp+'_'+str(int(nun))
ttc_stats[tag] = sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ]['TTC'].describe()
ttc_compare = pd.DataFrame(ttc_stats)
sort_cols = ['exp1_8' , 'exp2_8' , 'exp1_16', 'exp2_16',
'exp1_32', 'exp2_32', 'exp1_64', 'exp2_64']
ttc_compare = ttc_compare.reindex_axis(sort_cols, axis=1)
ttc_compare
(ttc_compare.loc['std']/ttc_compare.loc['mean'])*100
last_sv = None
last_id = None
for s in sessions['session']:
sv = s.describe('state_values', etype=['pilot']).values()[0].values()[0]
if last_sv and last_sv != sv:
print "Different state models:\n%s = %s\n%s = %s" % (last_id, last_sv, sid, sv)
last_sv = sv
last_id = s._sid
pprint.pprint(last_sv)
# Model of pilot durations.
ttpdm = {'TT_PILOT_PMGR_SCHEDULING': ['NEW' , 'PMGR_LAUNCHING_PENDING'],
'TT_PILOT_PMGR_QUEUING' : ['PMGR_LAUNCHING_PENDING', 'PMGR_LAUNCHING'],
'TT_PILOT_LRMS_SUBMITTING': ['PMGR_LAUNCHING' , 'PMGR_ACTIVE_PENDING'],
'TT_PILOT_LRMS_QUEUING' : ['PMGR_ACTIVE_PENDING' , 'PMGR_ACTIVE'],
'TT_PILOT_LRMS_RUNNING' : ['PMGR_ACTIVE' , ['DONE',
'CANCELED',
'FAILED']]}
# Add total pilot durations to sessions' DF.
for sid in sessions.index:
s = sessions.ix[sid, 'session'].filter(etype='pilot', inplace=False)
for d in ttpdm.keys():
sessions.ix[sid, d] = s.duration(ttpdm[d])
# Print the relevant portion of the 'session' DataFrame.
display(sessions[['TT_PILOT_PMGR_SCHEDULING', 'TT_PILOT_PMGR_QUEUING',
'TT_PILOT_LRMS_SUBMITTING', 'TT_PILOT_LRMS_QUEUING',
'TT_PILOT_LRMS_RUNNING']].head(3))
display(sessions[['TT_PILOT_PMGR_SCHEDULING', 'TT_PILOT_PMGR_QUEUING',
'TT_PILOT_LRMS_SUBMITTING', 'TT_PILOT_LRMS_QUEUING',
'TT_PILOT_LRMS_RUNNING']].tail(3))
fig = plt.figure(figsize=(13,14))
title = 'XSEDE OSG Virtual Cluster'
subtitle = 'TTQ and TTC.'
defs = {'ttq': 'TTQ = Total Time Queuing pilots',
'ttr': 'TTR = Total Time Running pilots',
'ttc': 'TTC = Total Time Completing experiment'}
fig.suptitle('%s:\n%s.\n%s;\n%s.' % (title,
subtitle,
defs['ttq'],
defs['ttc']), fontsize=14)
gs = []
grid = gridspec.GridSpec(2, 2)
grid.update(wspace=0.4, top=0.85)
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[0]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[1]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[2]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 5, subplot_spec=grid[3]))
ttc_subplots = []
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
if not sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].empty:
ttc_subplots.append(sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].sort_values('TTC'))
colors = {'exp1': [tableau20[0] ,tableau20[19]],
'exp2': [tableau20[2] ,tableau20[7] ],
'exp3': [tableau20[8] ,tableau20[13]],
'exp4': [tableau20[4] ,tableau20[17]],
'exp5': [tableau20[10],tableau20[15]]}
nun_exp = []
nun_exp.append(len(sessions[sessions['experiment'] == 'exp1']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp2']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp3']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp4']['nunits'].sort_values().unique()))
ax = []
i = 0
while(i < len(ttc_subplots)):
for gn in range(4):
for gc in range(nun_exp[gn]):
session = ttc_subplots.pop(0)
experiment = session['experiment'].unique()[0]
ntasks = int(session['nunits'].unique()[0])
repetitions = session.shape[0]
color = colors[experiment]
title = 'Exp. %s\n%s tasks\n%s reps.' % (experiment[3], ntasks, repetitions)
if i == 0:
ax.append(plt.Subplot(fig, gs[gn][0, gc]))
else:
ax.append(plt.Subplot(fig, gs[gn][0, gc], sharey=ax[0]))
session[['TT_PILOT_LRMS_QUEUING',
'TTC']].plot(kind='bar', ax=ax[i], color=color, title=title, stacked=True)
ax[i].spines["top"].set_visible(False)
ax[i].spines["right"].set_visible(False)
ax[i].get_xaxis().tick_bottom()
ax[i].get_yaxis().tick_left()
ax[i].set_xticklabels([])
ax[i].set_xlabel('Runs')
# Handle a bug that sets yticklabels to visible
# for the last subplot.
if i == 7 or i == 16:
plt.setp(ax[i].get_yticklabels(), visible=False)
else:
ax[i].set_ylabel('Time (s)')
# Handle legens.
if i == 7 or i == 3 or i == 11:
ax[i].legend(labels=['TTQ','TTC'], bbox_to_anchor=(2.25, 1))
elif i == 16:
ax[i].legend(labels=['TTQ','TTC'], bbox_to_anchor=(2.70, 1))
else:
ax[i].get_legend().set_visible(False)
fig.add_subplot(ax[i])
i += 1
plt.savefig('figures/osg_ttq_ttc_nunits.pdf', dpi=600, bbox_inches='tight')
ttc_stats = {}
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
tag = exp+'_'+str(int(nun))
ttc_stats[tag] = sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ]['TT_PILOT_LRMS_QUEUING'].describe()
ttc_compare = pd.DataFrame(ttc_stats)
sort_cols = ['exp1_8' , 'exp2_8' , 'exp1_16', 'exp2_16',
'exp1_32', 'exp2_32', 'exp1_64', 'exp2_64']
ttc_compare = ttc_compare.reindex_axis(sort_cols, axis=1)
ttc_compare.round(2)
std_mean = (ttc_compare.loc['std']/ttc_compare.loc['mean'])*100
std_mean.round(2)
# Model of pilot durations.
pdm = {'PMGR_SCHEDULING': ['NEW' , 'PMGR_LAUNCHING_PENDING'],
'PMGR_QUEUING' : ['PMGR_LAUNCHING_PENDING', 'PMGR_LAUNCHING'],
'LRMS_SUBMITTING': ['PMGR_LAUNCHING' , 'PMGR_ACTIVE_PENDING'],
'LRMS_QUEUING' : ['PMGR_ACTIVE_PENDING' , 'PMGR_ACTIVE'],
'LRMS_RUNNING' : ['PMGR_ACTIVE' , ['DONE',
'CANCELED',
'FAILED']]}
# DataFrame structure for pilot durations.
pds = { 'pid': [],
'sid': [],
'experiment' : [],
'PMGR_SCHEDULING': [],
'PMGR_QUEUING' : [],
'LRMS_SUBMITTING': [],
'LRMS_QUEUING' : [],
'LRMS_RUNNING' : []}
# Calculate the duration for each state of each
# pilot of each run and Populate the DataFrame
# structure.
for sid in sessions.index:
s = sessions.ix[sid, 'session'].filter(etype='pilot', inplace=False)
for p in s.list('uid'):
sf = s.filter(uid=p, inplace=False)
pds['pid'].append(p)
pds['sid'].append(sid)
pds['experiment'].append(sessions.ix[sid, 'experiment'])
for d in pdm.keys():
if (not sf.timestamps(state=pdm[d][0]) or
not sf.timestamps(state=pdm[d][1])):
pds[d].append(None)
continue
pds[d].append(sf.duration(pdm[d]))
# Populate the DataFrame.
pilots = pd.DataFrame(pds)
display(pilots.head(3))
display(pilots.tail(3))
def parse_osg_hostid(hostid):
'''
Heuristic: eliminate node-specific information from hostID.
'''
domain = None
# Split domain name from IP.
host = hostid.split(':')
# Split domain name into words.
words = host[0].split('.')
# Get the words in the domain name that do not contain
# numbers. Most hostnames have no number but there are
# exceptions.
literals = [l for l in words if not any((number in set('0123456789')) for number in l)]
# Check for exceptions:
# a. every word of the domain name has a number
if len(literals) == 0:
# Some hostname use '-' instead of '.' as word separator.
# The parser would have returned a single word and the
# any of that word may have a number.
if '-' in host[0]:
words = host[0].split('-')
literals = [l for l in words if not any((number in set('0123456789')) for number in l)]
# FIXME: We do not check the size of literals.
domain = '.'.join(literals)
# Some hostnames may have only the name of the node. We
# have to keep the IP to decide later on whether two nodes
# are likely to belong to the same cluster.
elif 'n' in host[0] or 'nod' in host[0]:
domain = '.'.join(host)
# The hostname is identified by an alphanumeric string
else:
domain = '.'.join(host)
# Some hostnames DO have numbers in their name.
elif len(literals) == 1:
domain = '.'.join(words[1:])
# Some hostname are just simple to parse.
else:
domain = '.'.join(literals)
return domain
for pix in pilots.index:
sid = pilots.ix[pix,'sid']
pid = pilots.ix[pix,'pid']
pls = sessions.ix[sid, 'session'].filter(uid=pid, inplace=False).get(etype=['pilot'])
if len(pls) != 1:
print "Error: session filter on uid returned multiple pilots"
break
hostid = pls[0].cfg['hostid']
if hostid:
domain = parse_osg_hostid(hostid)
else:
domain = np.nan
pilots.ix[pix,'hostID'] = hostid
pilots.ix[pix,'parsed_hostID'] = domain
fig, ax = fig_setup()
title='XSEDE OSG Virtual Cluster\nDensity of Pilot Tq'
tq_exp1 = pilots[pilots['experiment'].str.contains('exp1')]['LRMS_QUEUING'].dropna().reset_index(drop=True)
tq_exp2 = pilots[pilots['experiment'].str.contains('exp2')]['LRMS_QUEUING'].dropna().reset_index(drop=True)
tq_exp3 = pilots[pilots['experiment'].str.contains('exp3')]['LRMS_QUEUING'].dropna().reset_index(drop=True)
tq_exp4 = pilots[pilots['experiment'].str.contains('exp4')]['LRMS_QUEUING'].dropna().reset_index(drop=True)
plots = pd.DataFrame({'exp1': tq_exp1, 'exp2': tq_exp2, 'exp3': tq_exp3, 'exp4': tq_exp4})
#plots.plot.hist(ax=ax, color=[tableau20[19],tableau20[7],tableau20[13],tableau20[17]], title=title)
plots.plot.density(ax=ax, color=[tableau20[0],tableau20[2],tableau20[8],tableau20[4]], title=title)
ax.set_xlabel('Time (s)')
ax.legend(labels=['Tq Experiment 1','Tq Experiment 2','Tq Experiment 3','Tq Experiment 4'])
plt.savefig('figures/osg_tq_frequency.pdf', dpi=600, bbox_inches='tight')
fig = plt.figure(figsize=(13,14))
title = 'XSEDE OSG Virtual Cluster'
subtitle = 'TTQ, TTR and TTC'
defs = {'ttq': 'TTQ = Total Time Queuing pilots',
'ttr': 'TTR = Total Time Running pilots',
'ttc': 'TTC = Total Time Completing experiment'}
fig.suptitle('%s:\n%s.\n%s;\n%s;\n%s.' % (title,
subtitle,
defs['ttq'],
defs['ttr'],
defs['ttc']), fontsize=14)
gs = []
grid = gridspec.GridSpec(2, 2)
grid.update(wspace=0.4, top=0.85)
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[0]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[1]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[2]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 5, subplot_spec=grid[3]))
ttq_subplots = []
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
if not sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].empty:
ttq_subplots.append(sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].sort_values('TTC'))
colors = {'exp1': [tableau20[0] ,tableau20[18],tableau20[19]],
'exp2': [tableau20[2] ,tableau20[6] ,tableau20[7] ],
'exp3': [tableau20[8] ,tableau20[12],tableau20[13]],
'exp4': [tableau20[4] ,tableau20[16],tableau20[17]],
'exp5': [tableau20[10],tableau20[14],tableau20[15]]}
nun_exp = []
nun_exp.append(len(sessions[sessions['experiment'] == 'exp1']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp2']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp3']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp4']['nunits'].sort_values().unique()))
ax = []
i = 0
while(i < len(ttq_subplots)):
for gn in range(4):
for gc in range(nun_exp[gn]):
session = ttq_subplots.pop(0)
experiment = session['experiment'].unique()[0]
ntasks = int(session['nunits'].unique()[0])
repetitions = session.shape[0]
color = colors[experiment]
title = 'Exp. %s\n%s tasks\n%s rep.' % (experiment[3], ntasks, repetitions)
if i == 0:
ax.append(plt.Subplot(fig, gs[gn][0, gc]))
else:
ax.append(plt.Subplot(fig, gs[gn][0, gc], sharey=ax[0]))
session[['TT_PILOT_LRMS_QUEUING',
'TT_PILOT_LRMS_RUNNING',
'TTC']].plot(kind='bar', ax=ax[i], color=color, title=title, stacked=True)
ax[i].spines["top"].set_visible(False)
ax[i].spines["right"].set_visible(False)
ax[i].get_xaxis().tick_bottom()
ax[i].get_yaxis().tick_left()
ax[i].set_xticklabels([])
ax[i].set_xlabel('Runs')
# Handle a bug that sets yticklabels to visible
# for the last subplot.
if i == 7 or i == 16:
plt.setp(ax[i].get_yticklabels(), visible=False)
else:
ax[i].set_ylabel('Time (s)')
# Handle legens.
if i == 7 or i == 3 or i == 11:
ax[i].legend(labels=['TTQ','TTR','TTC'], bbox_to_anchor=(2.25, 1))
elif i == 16:
ax[i].legend(labels=['TTQ','TTR','TTC'], bbox_to_anchor=(2.70, 1))
else:
ax[i].get_legend().set_visible(False)
fig.add_subplot(ax[i])
i += 1
plt.savefig('figures/osg_ttq_ttr_ttc_nunits.pdf', dpi=600, bbox_inches='tight')
# Temporary: workaround for bug ticket \#15. Calculates
# the number of active pilots by looking into the
# length of the list returned by timestamp on the
# PMGR_ACTIVE state.
for sid in sessions.index:
sessions.ix[sid, 'npilot_active'] = len(sessions.ix[sid, 'session'].filter(etype='pilot', inplace=False).timestamps(state='PMGR_ACTIVE'))
display(sessions[['npilot_active']].head(3))
display(sessions[['npilot_active']].tail(3))
fig = plt.figure(figsize=(13,14))
title = 'XSEDE OSG Virtual Cluster'
subtitle = 'TTQ, TTR and TTC with Number of Active Pilots'
defs = {'ttq': 'TTQ = Total Time Queuing pilots',
'ttr': 'TTR = Total Time Running pilots',
'ttc': 'TTC = Total Time Completing experiment'}
fig.suptitle('%s:\n%s.\n%s;\n%s;\n%s.' % (title,
subtitle,
defs['ttq'],
defs['ttr'],
defs['ttc']), fontsize=14)
gs = []
grid = gridspec.GridSpec(2, 2)
grid.update(wspace=0.4, top=0.85)
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[0]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[1]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[2]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 5, subplot_spec=grid[3]))
ttq_subplots = []
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
if not sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].empty:
ttq_subplots.append(sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].sort_values('TTC'))
colors = {'exp1': [tableau20[0] ,tableau20[18],tableau20[19]],
'exp2': [tableau20[2] ,tableau20[6] ,tableau20[7] ],
'exp3': [tableau20[8] ,tableau20[12],tableau20[13]],
'exp4': [tableau20[4] ,tableau20[16],tableau20[17]],
'exp5': [tableau20[10],tableau20[14],tableau20[15]]}
nun_exp = []
nun_exp.append(len(sessions[sessions['experiment'] == 'exp1']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp2']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp3']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp4']['nunits'].sort_values().unique()))
ax = []
i = 0
while(i < len(ttq_subplots)):
for gn in range(4):
for gc in range(nun_exp[gn]):
session = ttq_subplots.pop(0)
experiment = session['experiment'].unique()[0]
ntasks = int(session['nunits'].unique()[0])
repetitions = session.shape[0]
color = colors[experiment]
title = 'Exp. %s\n%s tasks\n%s rep.' % (experiment[3], ntasks, repetitions)
if i == 0:
ax.append(plt.Subplot(fig, gs[gn][0, gc]))
else:
ax.append(plt.Subplot(fig, gs[gn][0, gc], sharey=ax[0]))
session[['TT_PILOT_LRMS_QUEUING',
'TT_PILOT_LRMS_RUNNING',
'TTC']].plot(kind='bar', ax=ax[i], color=color, title=title, stacked=True)
ax[i].spines["top"].set_visible(False)
ax[i].spines["right"].set_visible(False)
ax[i].get_xaxis().tick_bottom()
ax[i].get_yaxis().tick_left()
ax[i].set_xticklabels([])
ax[i].set_xlabel('Runs')
# Handle a bug that sets yticklabels to visible
# for the last subplot.
if i == 7 or i == 16:
plt.setp(ax[i].get_yticklabels(), visible=False)
else:
ax[i].set_ylabel('Time (s)')
# Handle legens.
if i == 7 or i == 3 or i == 11:
ax[i].legend(labels=['TTQ','TTR','TTC'], bbox_to_anchor=(2.25, 1))
elif i == 16:
ax[i].legend(labels=['TTQ','TTR','TTC'], bbox_to_anchor=(2.70, 1))
else:
ax[i].get_legend().set_visible(False)
# Add labels with number of pilots per session.
rects = ax[i].patches
labels = [int(l) for l in session['npilot_active']]
for rect, label in zip(rects[-repetitions:], labels):
height = rect.get_height()
ax[i].text(rect.get_x() + rect.get_width()/2,
(height*2), label, ha='center',
va='bottom')
fig.add_subplot(ax[i])
i += 1
plt.savefig('figures/osg_ttq_ttr_ttc_npactive_nunits.pdf', dpi=600, bbox_inches='tight')
last_sv = None
last_id = None
for s in sessions['session']:
sv = s.describe('state_values', etype=['unit']).values()[0].values()[0]
if last_sv and last_sv != sv:
print "Different state models:\n%s = %s\n%s = %s" % (last_id, last_sv, sid, sv)
last_sv = sv
last_id = s._sid
pprint.pprint(last_sv)
# Model of unit durations.
udm = {'TT_UNIT_UMGR_SCHEDULING' : ['NEW' , 'UMGR_SCHEDULING_PENDING'],
'TT_UNIT_UMGR_BINDING' : ['UMGR_SCHEDULING_PENDING' , 'UMGR_SCHEDULING'],
'TT_IF_UMGR_SCHEDULING' : ['UMGR_SCHEDULING' , 'UMGR_STAGING_INPUT_PENDING'],
'TT_IF_UMGR_QUEING' : ['UMGR_STAGING_INPUT_PENDING' , 'UMGR_STAGING_INPUT'],
'TT_IF_AGENT_SCHEDULING' : ['UMGR_STAGING_INPUT' , 'AGENT_STAGING_INPUT_PENDING'],
'TT_IF_AGENT_QUEUING' : ['AGENT_STAGING_INPUT_PENDING' , 'AGENT_STAGING_INPUT'],
'TT_IF_AGENT_TRANSFERRING' : ['AGENT_STAGING_INPUT' , 'AGENT_SCHEDULING_PENDING'],
'TT_UNIT_AGENT_QUEUING' : ['AGENT_SCHEDULING_PENDING' , 'AGENT_SCHEDULING'],
'TT_UNIT_AGENT_SCHEDULING' : ['AGENT_SCHEDULING' , 'AGENT_EXECUTING_PENDING'],
'TT_UNIT_AGENT_QUEUING_EXEC': ['AGENT_EXECUTING_PENDING' , 'AGENT_EXECUTING'],
'TT_UNIT_AGENT_EXECUTING' : ['AGENT_EXECUTING' , 'AGENT_STAGING_OUTPUT_PENDING'],
'TT_OF_AGENT_QUEUING' : ['AGENT_STAGING_OUTPUT_PENDING', 'AGENT_STAGING_OUTPUT'],
'TT_OF_UMGR_SCHEDULING' : ['AGENT_STAGING_OUTPUT' , 'UMGR_STAGING_OUTPUT_PENDING'],
'TT_OF_UMGR_QUEUING' : ['UMGR_STAGING_OUTPUT_PENDING' , 'UMGR_STAGING_OUTPUT'],
'TT_OF_UMGR_TRANSFERRING' : ['UMGR_STAGING_OUTPUT' , 'DONE']}
# Calculate total unit durations for each session.
for sid in sessions.index:
s = sessions.ix[sid, 'session'].filter(etype='unit', inplace=False)
for d in udm.keys():
sessions.ix[sid, d] = s.duration(udm[d])
# Print the new columns of the session DF with total unit durations.
display(sessions[['TT_UNIT_UMGR_SCHEDULING' , 'TT_UNIT_UMGR_BINDING' , 'TT_IF_UMGR_SCHEDULING' ,
'TT_IF_UMGR_QUEING' , 'TT_IF_AGENT_SCHEDULING' , 'TT_IF_AGENT_QUEUING' ,
'TT_IF_AGENT_TRANSFERRING' , 'TT_UNIT_AGENT_QUEUING' , 'TT_UNIT_AGENT_SCHEDULING',
'TT_UNIT_AGENT_QUEUING_EXEC', 'TT_UNIT_AGENT_EXECUTING', 'TT_OF_AGENT_QUEUING' ,
'TT_OF_UMGR_SCHEDULING' , 'TT_OF_UMGR_QUEUING' , 'TT_OF_UMGR_TRANSFERRING']].head(3))
display(sessions[['TT_UNIT_UMGR_SCHEDULING' , 'TT_UNIT_UMGR_BINDING' , 'TT_IF_UMGR_SCHEDULING' ,
'TT_IF_UMGR_QUEING' , 'TT_IF_AGENT_SCHEDULING' , 'TT_IF_AGENT_QUEUING' ,
'TT_IF_AGENT_TRANSFERRING' , 'TT_UNIT_AGENT_QUEUING' , 'TT_UNIT_AGENT_SCHEDULING',
'TT_UNIT_AGENT_QUEUING_EXEC', 'TT_UNIT_AGENT_EXECUTING', 'TT_OF_AGENT_QUEUING' ,
'TT_OF_UMGR_SCHEDULING' , 'TT_OF_UMGR_QUEUING' , 'TT_OF_UMGR_TRANSFERRING']].tail(3))
# Add number of unique resorces per session.
for sid in sessions.index:
sessions.ix[sid, 'n_unique_host'] = len(pilots[pilots['sid'] == sid]['parsed_hostID'].unique())
fig = plt.figure(figsize=(13,14))
title = 'XSEDE OSG Virtual Cluster'
subtitle = 'TTQ, TTR, TTX and TTC with Number of Active Pilots (black) and Number of Unique Resources (red)'
fig.suptitle('%s:\n%s.' % (title, subtitle), fontsize=16)
defs = {'ttq': 'TTQ = Total Time Queuing pilots',
'ttr': 'TTR = Total Time Running pilots',
'ttx': 'TTR = Total Time Executing compute units',
'ttc': 'TTC = Total Time Completing experiment'}
defslist = '%s;\n%s;\n%s;\n%s.' % (defs['ttq'], defs['ttr'], defs['ttx'], defs['ttc'])
plt.figtext(.38,.89, defslist, fontsize=14, ha='left')
gs = []
grid = gridspec.GridSpec(2, 2)
grid.update(wspace=0.4, hspace=0.4, top=0.825)
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[0]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[1]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[2]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 5, subplot_spec=grid[3]))
ttq_subplots = []
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
if not sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].empty:
ttq_subplots.append(sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].sort_values('TTC'))
colors = {'exp1': [tableau20[0] ,tableau20[18],tableau20[1] ,tableau20[19]],
'exp2': [tableau20[2] ,tableau20[6] ,tableau20[3] ,tableau20[7] ],
'exp3': [tableau20[8] ,tableau20[12],tableau20[9] ,tableau20[13]],
'exp4': [tableau20[4] ,tableau20[16],tableau20[5] ,tableau20[17]],
'exp5': [tableau20[10],tableau20[14],tableau20[11],tableau20[15]]}
nun_exp = []
nun_exp.append(len(sessions[sessions['experiment'] == 'exp1']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp2']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp3']['nunits'].sort_values().unique()))
nun_exp.append(len(sessions[sessions['experiment'] == 'exp4']['nunits'].sort_values().unique()))
ax = []
i = 0
while(i < len(ttq_subplots)):
for gn in range(4):
for gc in range(nun_exp[gn]):
session = ttq_subplots.pop(0)
experiment = session['experiment'].unique()[0]
ntasks = int(session['nunits'].unique()[0])
npilots = int(session[session['experiment'] == experiment]['npilot_active'][0])
repetitions = session.shape[0]
color = colors[experiment]
title = 'Exp. %s\n%s tasks\n%s pilots\n%s rep.' % (experiment[3], ntasks, npilots, repetitions)
if i == 0:
ax.append(plt.Subplot(fig, gs[gn][0, gc]))
else:
ax.append(plt.Subplot(fig, gs[gn][0, gc], sharey=ax[0]))
session[['TT_PILOT_LRMS_QUEUING',
'TT_PILOT_LRMS_RUNNING',
'TT_UNIT_AGENT_EXECUTING',
'TTC']].plot(kind='bar', ax=ax[i], color=color, title=title, stacked=True)
ax[i].spines["top"].set_visible(False)
ax[i].spines["right"].set_visible(False)
ax[i].get_xaxis().tick_bottom()
ax[i].get_yaxis().tick_left()
ax[i].set_xticklabels([])
ax[i].set_xlabel('Runs')
# Handle a bug that sets yticklabels to visible
# for the last subplot.
if i == 7 or i == 16:
plt.setp(ax[i].get_yticklabels(), visible=False)
else:
ax[i].set_ylabel('Time (s)')
# Handle legens.
if i == 7 or i == 3 or i == 11:
ax[i].legend(labels=['TTQ','TTR','TTX','TTC'], bbox_to_anchor=(2.25, 1))
elif i == 16:
ax[i].legend(labels=['TTQ','TTR','TTX','TTC'], bbox_to_anchor=(2.70, 1))
else:
ax[i].get_legend().set_visible(False)
# Add labels with number of pilots per session.
rects = ax[i].patches
labels = [int(l) for l in session['npilot_active']]
for rect, label in zip(rects[-repetitions:], labels):
height = rect.get_height()
ax[i].text(rect.get_x() + rect.get_width()/2,
(height*3)+1500, label, ha='center',
va='bottom')
# Add labels with number of unique resources per session.
rects = ax[i].patches
labels = [int(l) for l in session['n_unique_host']]
for rect, label in zip(rects[-repetitions:], labels):
height = rect.get_height()
ax[i].text(rect.get_x() + rect.get_width()/2,
height*3, label, ha='center',
va='bottom', color='red')
fig.add_subplot(ax[i])
i += 1
plt.savefig('figures/osg_ttq_ttr_ttx_ttc_npactive_nrunique_nunits.pdf', dpi=600, bbox_inches='tight')
sessions[['TT_PILOT_LRMS_RUNNING', 'TT_UNIT_AGENT_EXECUTING']].describe()
fig = plt.figure(figsize=(13,7))
fig.suptitle('TTX with Number of Active Pilots - XSEDE OSG Virtual Cluster', fontsize=14)
gs = []
grid = gridspec.GridSpec(1, 2)
grid.update(wspace=0.4, top=0.85)
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[0]))
gs.append(gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=grid[1]))
ttq_subplots = []
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
ttq_subplots.append(sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ].sort_values('TTC'))
colors = {'exp1': [tableau20[18]],
'exp2': [tableau20[10]]}
ax = []
i = 0
while(i<8):
for gn in range(2):
for gc in range(4):
session = ttq_subplots.pop(0)
experiment = session['experiment'].unique()[0]
ntasks = int(session['nunits'].unique()[0])
repetitions = session.shape[0]
color = colors[experiment]
title = 'Experiment %s\n%s tasks\n%s repetitions.' % (experiment[3], ntasks, repetitions)
if i == 0:
ax.append(plt.Subplot(fig, gs[gn][0, gc]))
else:
ax.append(plt.Subplot(fig, gs[gn][0, gc], sharey=ax[0]))
session[['TT_UNIT_AGENT_EXECUTING']].plot(kind='bar', ax=ax[i], color=color, title=title, stacked=True)
ax[i].spines["top"].set_visible(False)
ax[i].spines["right"].set_visible(False)
ax[i].get_xaxis().tick_bottom()
ax[i].get_yaxis().tick_left()
ax[i].set_xticklabels([])
ax[i].set_xlabel('Sessions')
# Handle a bug that sets yticklabels to visible
# for the last subplot.
if i == 7:
plt.setp(ax[i].get_yticklabels(), visible=False)
else:
ax[i].set_ylabel('Time (s)')
# Handle legens.
if i == 7 or i == 3:
ax[i].legend(labels=['TTX'], bbox_to_anchor=(2.25, 1))
else:
ax[i].get_legend().set_visible(False)
# Add labels with number of pilots per session.
rects = ax[i].patches
labels = [int(l) for l in session['npilot_active']]
for rect, label in zip(rects[-repetitions:], labels):
height = rect.get_height()
ax[i].text(rect.get_x() + rect.get_width()/2,
height+0.5, label, ha='center',
va='bottom')
fig.add_subplot(ax[i])
i += 1
plt.savefig('figures/osg_ttx_npactive_nunits.pdf', dpi=600, bbox_inches='tight')
ttx_stats = {}
for exp in sessions['experiment'].sort_values().unique():
for nun in sessions['nunits'].sort_values().unique():
tag = exp+'_'+str(int(nun))
ttx_stats[tag] = sessions[ (sessions['experiment'] == exp) &
(sessions['nunits'] == nun) ]['TT_UNIT_AGENT_EXECUTING'].describe()
ttx_compare = pd.DataFrame(ttx_stats)
sort_cols_runs = ['exp1_8' , 'exp2_8' , 'exp1_16', 'exp2_16',
'exp1_32', 'exp2_32', 'exp1_64', 'exp2_64']
sort_cols_exp = ['exp1_8' , 'exp1_16', 'exp1_32', 'exp1_64',
'exp2_8', 'exp2_16', 'exp2_32', 'exp2_64']
ttx_compare_runs = ttx_compare.reindex_axis(sort_cols_runs, axis=1)
ttx_compare_exp = ttx_compare.reindex_axis(sort_cols_exp, axis=1)
ttx_compare_exp.round(2)
std_mean = (ttx_compare_exp.loc['std']/ttx_compare_exp.loc['mean'])*100
std_mean.round(2)
from collections import OrderedDict
cTTXs = OrderedDict()
ncu = 64
ss = sessions[sessions['nunits'] == ncu].sort_values(['experiment','TTC'])['session']
for s in ss:
cTTXs[s._sid] = s.filter(etype='unit').concurrency(state=['AGENT_EXECUTING','AGENT_STAGING_OUTPUT_PENDING'],
sampling=1)
for sid, cTTX in cTTXs.iteritems():
title = 'Degree of Concurrent Execution of %s CUs - XSEDE OSG Virtual Cluster\nSession %s' % (ncu, sid)
x = [x[0] for x in cTTX]
y = [y[1] for y in cTTX]
color = tableau20[2]
if 'ming' in sid:
color = tableau20[0]
fig, ax = fig_setup()
fig.suptitle(title, fontsize=14)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Number of CU')
display(ax.plot(x, y, marker='.', linestyle='', color=color))
# Model of unit durations.
udm = {'UNIT_UMGR_SCHEDULING' : ['NEW' , 'UMGR_SCHEDULING_PENDING'],
'UNIT_UMGR_BINDING' : ['UMGR_SCHEDULING_PENDING' , 'UMGR_SCHEDULING'],
'IF_UMGR_SCHEDULING' : ['UMGR_SCHEDULING' , 'UMGR_STAGING_INPUT_PENDING'],
'IF_UMGR_QUEING' : ['UMGR_STAGING_INPUT_PENDING' , 'UMGR_STAGING_INPUT'],
#'IF_AGENT_SCHEDULING' : ['UMGR_STAGING_INPUT' , 'AGENT_STAGING_INPUT_PENDING'],
'IF_AGENT_QUEUING' : ['AGENT_STAGING_INPUT_PENDING' , 'AGENT_STAGING_INPUT'],
'IF_AGENT_TRANSFERRING' : ['AGENT_STAGING_INPUT' , 'AGENT_SCHEDULING_PENDING'],
'UNIT_AGENT_QUEUING' : ['AGENT_SCHEDULING_PENDING' , 'AGENT_SCHEDULING'],
'UNIT_AGENT_SCHEDULING' : ['AGENT_SCHEDULING' , 'AGENT_EXECUTING_PENDING'],
'UNIT_AGENT_QUEUING_EXEC': ['AGENT_EXECUTING_PENDING' , 'AGENT_EXECUTING'],
'UNIT_AGENT_EXECUTING' : ['AGENT_EXECUTING' , 'AGENT_STAGING_OUTPUT_PENDING'],
#'OF_AGENT_QUEUING' : ['AGENT_STAGING_OUTPUT_PENDING', 'AGENT_STAGING_OUTPUT'],
#'OF_UMGR_SCHEDULING' : ['AGENT_STAGING_OUTPUT' , 'UMGR_STAGING_OUTPUT_PENDING'],
'OF_UMGR_QUEUING' : ['UMGR_STAGING_OUTPUT_PENDING' , 'UMGR_STAGING_OUTPUT'],
'OF_UMGR_TRANSFERRING' : ['UMGR_STAGING_OUTPUT' , 'DONE']}
# DataFrame structure for pilot durations.
uds = { 'pid': [],
'sid': [],
'experiment' : [],
'UNIT_UMGR_SCHEDULING' : [],
'UNIT_UMGR_BINDING' : [],
'IF_UMGR_SCHEDULING' : [],
'IF_UMGR_QUEING' : [],
'IF_AGENT_SCHEDULING' : [],
'IF_AGENT_QUEUING' : [],
'IF_AGENT_TRANSFERRING' : [],
'UNIT_AGENT_QUEUING' : [],
'UNIT_AGENT_SCHEDULING' : [],
'UNIT_AGENT_QUEUING_EXEC': [],
'UNIT_AGENT_EXECUTING' : [],
'OF_AGENT_QUEUING' : [],
'OF_UMGR_SCHEDULING' : [],
'OF_UMGR_QUEUING' : [],
'OF_UMGR_TRANSFERRING' : []}
# Calculate the duration for each state of each
# pilot of each run and Populate the DataFrame
# structure.
for sid in sessions[['session', 'experiment']].index:
s = sessions.ix[sid, 'session'].filter(etype='unit', inplace=False)
for u in s.list('uid'):
sf = s.filter(uid=u, inplace=False)
uds['pid'].append(u)
uds['sid'].append(sid)
uds['experiment'].append(sessions.ix[sid, 'experiment'])
for d in udm.keys():
if (not sf.timestamps(state=udm[d][0]) or
not sf.timestamps(state=udm[d][1])):
pds[d].append(None)
print udm[d]
continue
uds[d].append(sf.duration(udm[d]))
# Populate the DataFrame. We have empty lists
units = pd.DataFrame(dict([(k,pd.Series(v)) for k,v in uds.iteritems()]))
display(units.head(3))
display(units.tail(3))
def measures_of_center(durations):
m = {}
m['mu'] = np.mean(durations) # Mean value of the data
# standard error of the mean. Quantifies how
# precisely we know the true mean of the
# population. It takes into account both the
# value of the SD and the sample size. SEM
# gets smaller as your samples get larger:
# precision of the mean gets higher with the
# sample size.
m['sem'] = sps.sem(durations)
# Are there extremes in our dataset? Compare
# to the mean.
m['median'] = np.median(durations)
# What value occours most often?
m['mode'] = sps.mstats.mode(durations)
return m
Txs = units['UNIT_AGENT_EXECUTING']
Txs_exp1 = units[ units['experiment'] == 'exp1']['UNIT_AGENT_EXECUTING']
Txs_exp2 = units[ units['experiment'] == 'exp2']['UNIT_AGENT_EXECUTING']
Txs = sorted(Txs)
Txs_exp1 = sorted(Txs_exp1)
Txs_exp2 = sorted(Txs_exp2)
Tx_measures = measures_of_center(Txs)
Tx_measures_exp1 = measures_of_center(Txs_exp1)
Tx_measures_exp2 = measures_of_center(Txs_exp2)
print 'Tx'
pprint.pprint(Tx_measures)
print '\nTx_exp1'
pprint.pprint(Tx_measures_exp1)
print '\nTx_exp2'
pprint.pprint(Tx_measures_exp2)
def measures_of_spread(durations):
m = {}
m['range'] = max(durations)-min(durations)
m['min'], m['q1'], m['q2'], m['q3'], m['max'] = np.percentile(durations, [0,25,50,75,100])
m['irq'] = m['q3'] - m['q1']
m['var'] = np.var(durations)
m['std'] = np.std(durations)
m['mad'] = sm.robust.scale.mad(durations)
return m
print "Tx"
pprint.pprint(measures_of_spread(Txs))
print "\nTx exp1"
pprint.pprint(measures_of_spread(Txs_exp1))
print "\nTx exp2"
pprint.pprint(measures_of_spread(Txs_exp2))
plots = [Txs, Txs_exp1, Txs_exp2]
fig, ax = fig_setup()
fig.suptitle('Distribution of Tx for Experiment 1 and 2', fontsize=14)
ax.set_ylabel('Time (s)')
bp = ax.boxplot(plots, labels=['Tx', 'Tx exp1', 'Tx exp2'])#, showmeans=True, showcaps=True)
bp['boxes'][0].set( color=tableau20[8] )
bp['boxes'][1].set( color=tableau20[0] )
bp['boxes'][2].set( color=tableau20[2] )
plt.savefig('figures/osg_cu_spread_box.pdf', dpi=600, bbox_inches='tight')
# - Mann-Whitney-Wilcoxon (MWW) RankSum test: determine
# whether two distributions are significantly
# different or not. Unlike the t-test, the RankSum
# test does not assume that the data are normally
# distributed. How do we interpret the difference?
x = np.linspace(min(Txs),max(Txs),len(Txs))
Txs_pdf = mlab.normpdf(x, Tx_measures['mu'], Tx_measures['std'])
z_stat, p_val = sps.ranksums(Txs, Txs_pdf)
Tx_measures['skew'] = sps.skew(Txs, bias=True)
Tx_measures['kurt'] = sps.kurtosis(Txs)
u_skew_test = sps.skewtest(Txs)
u_kurt_test = sps.kurtosistest(Txs)
print Tx_measures['skew']
print Tx_measures['kurt']
print u_skew_test
print u_kurt_test
metric = 'T_x'
description = 'Histogram of $%s$' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures['mu'], Tx_measures['std'], Tx_measures['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
ax = fig_setup()
n, bins, patches = ax.hist(Txs, bins='fd',
normed=1,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[8],
color=tableau20[9])
# ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
# ax.set_xlim(150, 700)
ax.set_ylim(0.0, 0.005)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title)#, y=1.05)
plt.legend(loc='upper right')
plt.savefig('figures/osg_cu_spread_hist.pdf', dpi=600, bbox_inches='tight')
Tx_measures_exp1['skew'] = sps.skew(Txs, bias=True)
Tx_measures_exp1['kurt'] = sps.kurtosis(Txs)
u_skew_test = sps.skewtest(Txs)
u_kurt_test = sps.kurtosistest(Txs)
print Tx_measures_exp1['skew']
print Tx_measures_exp1['kurt']
print u_skew_test
print u_kurt_test
metric = 'T_x_exp1'
description = 'Histogram of $%s$' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs_exp1)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp1['mu'], Tx_measures_exp1['std'], Tx_measures_exp1['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
ax = fig_setup()
n, bins, patches = ax.hist(Txs_exp1, bins='fd',
normed=1,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[0],
color=tableau20[1])
# ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
# ax.set_xlim(150, 700)
ax.set_ylim(0.0, 0.005)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title)#, y=1.05)
plt.legend(loc='upper right')
plt.savefig('figures/osg_exp1_cu_spread_hist.pdf', dpi=600, bbox_inches='tight')
Tx_measures_exp2['skew'] = sps.skew(Txs, bias=True)
Tx_measures_exp2['kurt'] = sps.kurtosis(Txs)
u_skew_test = sps.skewtest(Txs)
u_kurt_test = sps.kurtosistest(Txs)
print Tx_measures_exp2['skew']
print Tx_measures_exp2['kurt']
print u_skew_test
print u_kurt_test
metric = 'T_x_exp2'
description = 'Histogram of $%s$' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs_exp2)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp2['mu'], Tx_measures_exp2['std'], Tx_measures_exp2['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
ax = fig_setup()
n, bins, patches = ax.hist(Txs_exp2, bins='fd',
normed=1,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[2],
color=tableau20[3])
# ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
# ax.set_xlim(150, 700)
ax.set_ylim(0.0, 0.005)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title)#, y=1.05)
plt.legend(loc='upper right')
plt.savefig('figures/osg_exp2_cu_spread_hist.pdf', dpi=600, bbox_inches='tight')
# - Fit to the normal distribution: fit the empirical
# distribution to the normal for comparison purposes.
(f_mu, f_sigma) = sps.norm.fit(Txs)
(f_mu_exp1, f_sigma_exp1) = sps.norm.fit(Txs_exp1)
(f_mu_exp2, f_sigma_exp2) = sps.norm.fit(Txs_exp2)
# sample_pdf = np.linspace(min(Txs),max(Txs), len(Txs))
sample_pdf = np.linspace(0,max(Txs), len(Txs))
sample_pdf_exp1 = np.linspace(0,max(Txs_exp1), len(Txs_exp1))
sample_pdf_exp2 = np.linspace(0,max(Txs_exp2), len(Txs_exp2))
metric = 'T_x'
description = 'Histogram of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures['mu'], Tx_measures['std'], Tx_measures['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
ax = fig_setup()
n, bins, p = ax.hist(Txs, bins='fd',
normed=True,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[8],
color=tableau20[9])
pdf = mlab.normpdf(sample_pdf, f_mu, f_sigma)
print min(pdf)
print max(pdf)
ax.plot(sample_pdf,
pdf,
label="$\phi$",
color=tableau20[6])
# ax.fill_between(bins,
# sample_pdf,
# color=tableau20[1],
# alpha=0.25)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
ax.set_xlim(min(sample_pdf), max(sample_pdf))
ax.set_ylim(0.0, 0.005)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title)#, y=1.05)
plt.legend(loc='upper right')
plt.savefig('osg_cu_spread_pdf.pdf', dpi=600, bbox_inches='tight')
metric = 'T_x_exp1'
description = 'Histogram of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs_exp1)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp1['mu'],
Tx_measures_exp1['std'],
Tx_measures_exp1['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
ax = fig_setup()
n, bins, p = ax.hist(Txs_exp1, bins='fd',
normed=True,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[0],
color=tableau20[1])
pdf_exp1 = mlab.normpdf(sample_pdf_exp1, f_mu_exp1, f_sigma_exp1)
print min(pdf_exp1)
print max(pdf_exp1)
ax.plot(sample_pdf_exp1,
pdf_exp1,
label="$\phi$",
color=tableau20[6])
# ax.fill_between(bins,
# sample_pdf,
# color=tableau20[1],
# alpha=0.25)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
ax.set_xlim(min(sample_pdf_exp1), max(sample_pdf_exp1))
ax.set_ylim(0.0, 0.005)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title)#, y=1.05)
plt.legend(loc='upper right')
plt.savefig('osg_exp1_cu_spread_pdf.pdf', dpi=600, bbox_inches='tight')
metric = 'T_x Experiment 2'
description = 'Histogram of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs_exp2)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp2['mu'],
Tx_measures_exp2['std'],
Tx_measures_exp2['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
ax = fig_setup()
n, bins, p = ax.hist(Txs_exp2, bins='fd',
normed=True,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[2],
color=tableau20[3])
pdf_exp2 = mlab.normpdf(sample_pdf_exp2, f_mu_exp2, f_sigma_exp2)
print min(pdf_exp2)
print max(pdf_exp2)
ax.plot(sample_pdf_exp2,
pdf_exp2,
label="$\phi$",
color=tableau20[6])
# ax.fill_between(bins,
# sample_pdf,
# color=tableau20[1],
# alpha=0.25)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
ax.set_xlim(min(sample_pdf_exp2), max(sample_pdf_exp2))
ax.set_ylim(0.0, 0.005)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title)#, y=1.05)
plt.legend(loc='upper right')
plt.savefig('osg_exp2_cu_spread_pdf.pdf', dpi=600, bbox_inches='tight')
# Values for analytical pdf
sample_pdf = np.random.normal(loc=f_mu, scale=f_sigma, size=len(Txs))
metric = 'T_x'
description = 'Cumulative distribution of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures['mu'], Tx_measures['std'], Tx_measures['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
ax = fig_setup()
n, bins, p = ax.hist(Txs,
bins='fd',
normed=True,
cumulative=True,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[8],
color=tableau20[9],
alpha=0.75)
ax.hist(sample_pdf,
bins='fd',
normed=True,
cumulative=True,
histtype='stepfilled',
label="$cmd$",
edgecolor=tableau20[6],
color=tableau20[7],
alpha=0.25)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title, y=1.05)
plt.legend(loc='upper left')
plt.savefig('osg_cu_cumulative_hist.pdf', dpi=600, bbox_inches='tight')
# Values for analytical pdf
sample_pdf_exp1 = np.random.normal(loc=f_mu_exp1, scale=f_sigma_exp1, size=len(Txs_exp1))
metric = 'T_x Experiment 1'
description = 'Cumulative distribution of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp1['mu'],
Tx_measures_exp1['std'],
Tx_measures_exp1['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
ax = fig_setup()
n, bins, p = ax.hist(Txs_exp1,
bins='fd',
normed=True,
cumulative=True,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[0],
color=tableau20[1],
alpha=0.75)
ax.hist(sample_pdf_exp1,
bins='fd',
normed=True,
cumulative=True,
histtype='stepfilled',
label="$cmd$",
edgecolor=tableau20[6],
color=tableau20[7],
alpha=0.25)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title, y=1.05)
plt.legend(loc='upper left')
plt.savefig('osg_exp1_cu_cumulative_hist.pdf', dpi=600, bbox_inches='tight')
# Values for analytical pdf
sample_pdf_exp2 = np.random.normal(loc=f_mu_exp2, scale=f_sigma_exp2, size=len(Txs_exp2))
metric = 'T_x Experiment 1'
description = 'Cumulative distribution of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp2['mu'],
Tx_measures_exp2['std'],
Tx_measures_exp2['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
ax = fig_setup()
n, bins, p = ax.hist(Txs_exp2,
bins='fd',
normed=True,
cumulative=True,
histtype='stepfilled',
label="$T_x$",
linewidth=0.75,
edgecolor=tableau20[2],
color=tableau20[3],
alpha=0.75)
ax.hist(sample_pdf_exp2,
bins='fd',
normed=True,
cumulative=True,
histtype='stepfilled',
label="$cmd$",
edgecolor=tableau20[6],
color=tableau20[7],
alpha=0.25)
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title, y=1.05)
plt.legend(loc='upper left')
plt.savefig('osg_exp2_cu_cumulative_hist.pdf', dpi=600, bbox_inches='tight')
Txs_np = np.array(Txs)
# Cumulative samples
Txs_sum = np.cumsum(np.ones(Txs_np.shape))/len(Txs)
# Values for analytical cdf
sample_cdf = np.linspace(0,max(Txs), len(Txs))
metric = 'T_x'
description = 'Cumulative distribution of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures['mu'], Tx_measures['std'], Tx_measures['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
ax = fig_setup()
ax.plot(sample_cdf,
sps.norm.cdf(sample_cdf, f_mu, f_sigma),
label="cdf",
color=tableau20[6])
ax.step(Txs,
Txs_sum,
label="$T_x$",
where='post',
color=tableau20[8])
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title, y=1.05)
plt.legend(loc='upper left')
plt.savefig('osg_cu_cumulative_plot.pdf', dpi=600, bbox_inches='tight')
Txs_np_exp1 = np.array(Txs_exp1)
# Cumulative samples
Txs_sum_exp1 = np.cumsum(np.ones(Txs_np_exp1.shape))/len(Txs_exp1)
# Values for analytical cdf
sample_cdf_exp1 = np.linspace(0,max(Txs_exp1), len(Txs_exp1))
metric = 'T_x Experiment 1'
description = 'Cumulative distribution of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs_exp1)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp1['mu'],
Tx_measures_exp1['std'],
Tx_measures_exp1['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
ax = fig_setup()
ax.plot(sample_cdf_exp1,
sps.norm.cdf(sample_cdf_exp1, f_mu_exp1, f_sigma_exp1),
label="cdf",
color=tableau20[6])
ax.step(Txs_exp1,
Txs_sum_exp1,
label="$T_x$",
where='post',
color=tableau20[0])
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title, y=1.05)
plt.legend(loc='upper left')
plt.savefig('osg_exp1_cu_cumulative_plot.pdf', dpi=600, bbox_inches='tight')
Txs_np_exp2 = np.array(Txs_exp2)
# Cumulative samples
Txs_sum_exp2 = np.cumsum(np.ones(Txs_np_exp2.shape))/len(Txs_exp2)
# Values for analytical cdf
sample_cdf_exp2 = np.linspace(0,max(Txs_exp2), len(Txs_exp2))
metric = 'T_x Experiment 2'
description = 'Cumulative distribution of $%s$ compared to its fitted normal distribution' % metric
task = 'Gromacs emulation'
repetition = '$%s$ repetitions' % len(Txs_exp2)
resource = 'XSEDE OSG Virtual Cluster'
stats = '$\mu$=%.3f,\ \sigma=%.3f,\ SE_\mu=%.3f$' % (Tx_measures_exp2['mu'],
Tx_measures_exp2['std'],
Tx_measures_exp2['sem'])
title = '%s.\n%s; %s;\n%s.' % (description, repetition, resource, stats)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
ax = fig_setup()
ax.plot(sample_cdf_exp2,
sps.norm.cdf(sample_cdf_exp2, f_mu_exp2, f_sigma_exp2),
label="cdf",
color=tableau20[6])
ax.step(Txs_exp2,
Txs_sum_exp2,
label="$T_x$",
where='post',
color=tableau20[2])
plt.ylabel('$P(Tx)$')
plt.xlabel('$T_x$ (s)')
plt.title(title, y=1.05)
plt.legend(loc='upper left')
plt.savefig('osg_exp2_cu_cumulative_plot.pdf', dpi=600, bbox_inches='tight')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Haiku function transformations
Step2: Transforming stateful functions
Step3: Modules
Step4: Nested and built-in modules
Step5: Stochastic modules
Step10: Combining JAX Function transformations and Haiku
|
<ASSISTANT_TASK:>
Python Code:
%%capture
!pip install git+https://github.com/deepmind/dm-haiku
import haiku as hk
%%capture
!pip install git+git://github.com/deepmind/optax.git
import optax
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
# Here is a function that takes in data x, and meta-data output_size,
# but creates its mutable parameters internally.
# The parameters define an affine mapping, f1(x) = b + W*x
def f1(x, output_size):
j, k = x.shape[-1], output_size
w_init = hk.initializers.TruncatedNormal(1.0 / np.sqrt(j))
w = hk.get_parameter("w", shape=[j, k], dtype=x.dtype, init=w_init)
b = hk.get_parameter("b", shape=[k], dtype=x.dtype, init=jnp.ones)
return jnp.dot(x, w) + b
# transform will convert f1 to a function that explicitly uses parameters, which we call f2.
# (We explain the rng part later.)
f2 = hk.without_apply_rng(hk.transform(f1))
# f2 is a struct with two functions, init and apply
print(f2)
# The init function creates an initial random set of parameters
# by calling f1 on some data x (the values don't matter, just the shape)
# and using the RNG.
# The params are stoerd in a haiku FlatMap (like a FrozenDict)
output_size = 2
dummy_x = jnp.array([[1.0, 2.0, 3.0]])
rng_key = jax.random.PRNGKey(42)
# params = f2.init(rng=rng_key, x=dummy_x, output_size = output_size)
params = f2.init(rng_key, dummy_x, output_size)
print(params)
p = params["~"]
print(p["b"])
# params are frozen
params["~"]["b"] = jnp.array([2.0, 2.0])
# The apply function takes a param FlatMap and injects it into the original f1 function
sample_x = jnp.array([[1.0, 2.0, 3.0]])
output_1 = f2.apply(params=params, x=sample_x, output_size=output_size)
print(output_1)
def stateful_f(x):
counter = hk.get_state("counter", shape=[], dtype=jnp.int32, init=jnp.ones)
multiplier = hk.get_parameter(
"multiplier",
shape=[
1,
],
dtype=x.dtype,
init=jnp.ones,
)
hk.set_state("counter", counter + 1)
output = x + multiplier * counter
return output
stateful_forward = hk.without_apply_rng(hk.transform_with_state(stateful_f))
sample_x = jnp.array(
[
[
5.0,
]
]
)
params, state = stateful_forward.init(x=sample_x, rng=rng_key)
print(f"Initial params:\n{params}\nInitial state:\n{state}")
print("##########")
for i in range(3):
output, state = stateful_forward.apply(params, state, x=sample_x)
print(f"After {i+1} iterations:\nOutput: {output}\nState: {state}")
print("##########")
class MyLinear1(hk.Module):
def __init__(self, output_size, name=None):
super().__init__(name=name)
self.output_size = output_size
def __call__(self, x):
j, k = x.shape[-1], self.output_size
w_init = hk.initializers.TruncatedNormal(1.0 / np.sqrt(j))
w = hk.get_parameter("w", shape=[j, k], dtype=x.dtype, init=w_init)
b = hk.get_parameter("b", shape=[k], dtype=x.dtype, init=jnp.ones)
return jnp.dot(x, w) + b
def _forward_fn_linear1(x):
module = MyLinear1(output_size=2)
return module(x)
forward_linear1 = hk.without_apply_rng(hk.transform(_forward_fn_linear1))
dummy_x = jnp.array([[1.0, 2.0, 3.0]])
rng_key = jax.random.PRNGKey(42)
params = forward_linear1.init(rng=rng_key, x=dummy_x)
print(params)
sample_x = jnp.array([[1.0, 2.0, 3.0]])
output_1 = forward_linear1.apply(params=params, x=sample_x)
print(output_1)
class MyModuleCustom(hk.Module):
def __init__(self, output_size=2, name="custom_linear"):
super().__init__(name=name)
self._internal_linear_1 = hk.nets.MLP(output_sizes=[2, 3], name="hk_internal_linear")
self._internal_linear_2 = MyLinear1(output_size=output_size, name="old_linear")
def __call__(self, x):
return self._internal_linear_2(self._internal_linear_1(x))
def _custom_forward_fn(x):
module = MyModuleCustom()
return module(x)
custom_forward_without_rng = hk.without_apply_rng(hk.transform(_custom_forward_fn))
params = custom_forward_without_rng.init(rng=rng_key, x=sample_x)
params
class HkRandom2(hk.Module):
def __init__(self, rate=0.5):
super().__init__()
self.rate = rate
def __call__(self, x):
key1 = hk.next_rng_key()
return jax.random.bernoulli(key1, 1.0 - self.rate, shape=x.shape)
class HkRandomNest(hk.Module):
def __init__(self, rate=0.5):
super().__init__()
self.rate = rate
self._another_random_module = HkRandom2()
def __call__(self, x):
key2 = hk.next_rng_key()
p1 = self._another_random_module(x)
p2 = jax.random.bernoulli(key2, 1.0 - self.rate, shape=x.shape)
print(f"Bernoullis are : {p1, p2}")
# Note that the modules that are stochastic cannot be wrapped with hk.without_apply_rng()
forward = hk.transform(lambda x: HkRandomNest()(x))
x = jnp.array(1.0)
params = forward.init(rng_key, x=x)
# The 2 Bernoullis can be difference, since they use key1 and key2
# But across the 5 iterations the answers should be the same,
# since they are all produced by passing in the same rng_key to apply.
for i in range(5):
print(f"\n Iteration {i+1}")
prediction = forward.apply(params, x=x, rng=rng_key)
from typing import Generator, Mapping, Tuple
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
Batch = Mapping[str, np.ndarray]
# Data
def load_dataset(
split: str,
*,
is_training: bool,
batch_size: int,
) -> Generator[Batch, None, None]:
Loads the dataset as a generator of batches.
ds = tfds.load("mnist:3.*.*", split=split).cache().repeat()
if is_training:
ds = ds.shuffle(10 * batch_size, seed=0)
ds = ds.batch(batch_size)
return iter(tfds.as_numpy(ds))
# Make datasets.
train = load_dataset("train", is_training=True, batch_size=1000)
train_eval = load_dataset("train", is_training=False, batch_size=10000)
test_eval = load_dataset("test", is_training=False, batch_size=10000)
# Model
NCLASSES = 10
def net_fn(batch: Batch) -> jnp.ndarray:
Standard LeNet-300-100 MLP network.
x = batch["image"].astype(jnp.float32) / 255.0
mlp = hk.Sequential(
[
hk.Flatten(),
hk.Linear(300),
jax.nn.relu,
hk.Linear(100),
jax.nn.relu,
hk.Linear(NCLASSES),
]
)
return mlp(x)
net = hk.without_apply_rng(hk.transform(net_fn))
L2_REGULARIZER = 1e-4
# Metrics
# Training loss (cross-entropy).
def loss(params: hk.Params, batch: Batch) -> jnp.ndarray:
Compute the loss of the network, including L2.
logits = net.apply(params, batch)
labels = jax.nn.one_hot(batch["label"], NCLASSES)
l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))
softmax_xent /= labels.shape[0]
return softmax_xent + L2_REGULARIZER * l2_loss
# Evaluation metric (classification accuracy).
@jax.jit
def accuracy(params: hk.Params, batch: Batch) -> jnp.ndarray:
predictions = net.apply(params, batch)
return jnp.mean(jnp.argmax(predictions, axis=-1) == batch["label"])
@jax.jit
def update(
params: hk.Params,
opt_state: optax.OptState,
batch: Batch,
) -> Tuple[hk.Params, optax.OptState]:
Learning rule (stochastic gradient descent).
grads = jax.grad(loss)(params, batch)
updates, opt_state = opt.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# We maintain avg_params, the exponential moving average of the "live" params.
# avg_params is used only for evaluation (cf. https://doi.org/10.1137/0330046)
@jax.jit
def ema_update(params, avg_params):
return optax.incremental_update(params, avg_params, step_size=0.001)
# Optimzier
LR = 1e-3
opt = optax.adam(LR)
# Initialize network and optimiser; note we draw an input to get shapes.
params = avg_params = net.init(jax.random.PRNGKey(42), next(train))
opt_state = opt.init(params)
# Train/eval loop.
nsteps = 500
print_every = 100
def callback(step, avg_params, train_eval, test_eval):
if step % print_every == 0:
# Periodically evaluate classification accuracy on train & test sets.
train_accuracy = accuracy(avg_params, next(train_eval))
test_accuracy = accuracy(avg_params, next(test_eval))
train_accuracy, test_accuracy = jax.device_get((train_accuracy, test_accuracy))
print(f"[Step {step}] Train / Test accuracy: " f"{train_accuracy:.3f} / {test_accuracy:.3f}.")
for step in range(nsteps + 1):
params, opt_state = update(params, opt_state, next(train))
avg_params = ema_update(params, avg_params)
callback(step, avg_params, train_eval, test_eval)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <img valign="middle" src="images/tfx.jpeg">
Step2: 1. Data Analysis
Step3: 1.2 Infer Schema
Step4: 1.3 Configure Schema
Step5: 1.4 Validate evaluation data
Step6: 1.5 Freeze the schema
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import tensorflow_data_validation as tfdv
print('TF version: {}'.format(tf.__version__))
print('TFDV version: {}'.format(tfdv.__version__))
PROJECT = 'cloud-training-demos' # Replace with your PROJECT
BUCKET = 'cloud-training-demos-ml' # Replace with your BUCKET
REGION = 'us-central1' # Choose an available region for Cloud MLE
import os
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
## ensure we predict locally with our current Python environment
gcloud config set ml_engine/local_python `which python`
DATA_DIR='gs://cloud-samples-data/ml-engine/census/data'
import os
TRAIN_DATA_FILE = os.path.join(DATA_DIR, 'adult.data.csv')
EVAL_DATA_FILE = os.path.join(DATA_DIR, 'adult.test.csv')
!gsutil ls -l $TRAIN_DATA_FILE
!gsutil ls -l $EVAL_DATA_FILE
HEADER = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week',
'native_country', 'income_bracket']
TARGET_FEATURE_NAME = 'income_bracket'
TARGET_LABELS = [' <=50K', ' >50K']
WEIGHT_COLUMN_NAME = 'fnlwgt'
# This is a convenience function for CSV. We can write a Beam pipeline for other formats.
# https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_csv
train_stats = tfdv.generate_statistics_from_csv(
data_location=TRAIN_DATA_FILE,
column_names=HEADER,
stats_options=tfdv.StatsOptions(
weight_feature=WEIGHT_COLUMN_NAME,
sample_rate=1.0
)
)
tfdv.visualize_statistics(train_stats)
schema = tfdv.infer_schema(statistics=train_stats)
tfdv.display_schema(schema=schema)
print(tfdv.get_feature(schema, 'age'))
# Relax the minimum fraction of values that must come from the domain for feature occupation.
occupation = tfdv.get_feature(schema, 'occupation')
occupation.distribution_constraints.min_domain_mass = 0.9
# Add new value to the domain of feature native_country, assuming that we start receiving this
# we won't be able to make great predictions of course, because this country is not part of our
# training data.
native_country_domain = tfdv.get_domain(schema, 'native_country')
native_country_domain.value.append('Egypt')
# All features are by default in both TRAINING and SERVING environments.
schema.default_environment.append('TRAINING')
schema.default_environment.append('EVALUATION')
schema.default_environment.append('SERVING')
# Specify that the class feature is not in SERVING environment.
tfdv.get_feature(schema, TARGET_FEATURE_NAME).not_in_environment.append('SERVING')
tfdv.display_schema(schema=schema)
eval_stats = tfdv.generate_statistics_from_csv(
EVAL_DATA_FILE,
column_names=HEADER,
stats_options=tfdv.StatsOptions(
weight_feature=WEIGHT_COLUMN_NAME)
)
eval_anomalies = tfdv.validate_statistics(eval_stats, schema, environment='EVALUATION')
tfdv.display_anomalies(eval_anomalies)
RAW_SCHEMA_LOCATION = 'raw_schema.pbtxt'
from tensorflow.python.lib.io import file_io
from google.protobuf import text_format
tfdv.write_schema_text(schema, RAW_SCHEMA_LOCATION)
!cat {RAW_SCHEMA_LOCATION}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Logistic Regression
Step2: 2.2. Classifiers based on the logistic model.
Step3: 3.3. Nonlinear classifiers.
Step4: 3. Inference
Step5: Now, we select two classes and two attributes.
Step6: 3.2.2. Data normalization
Step7: Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set.
Step8: The following figure generates a plot of the normalized training data.
Step9: In order to apply the gradient descent rule, we need to define two methods
Step10: We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\bf z}({\bf x}) = (1, {\bf x}^\intercal)^\intercal$.
Step11: 3.2.3. Free parameters
Step12: 3.2.5. Polynomial Logistic Regression
Step13: Visualizing the posterior map we can se that the polynomial transformation produces nonlinear decision boundaries.
Step14: 4. Regularization and MAP estimation.
Step15: 6. Logistic regression in Scikit Learn.
|
<ASSISTANT_TASK:>
Python Code:
# To visualize plots in the notebook
%matplotlib inline
# Imported libraries
import csv
import random
import matplotlib
import matplotlib.pyplot as plt
import pylab
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
# Define the logistic function
def logistic(x):
p = 1.0 / (1 + np.exp(-x))
return p
# Plot the logistic function
t = np.arange(-6, 6, 0.1)
z = logistic(t)
plt.plot(t, z)
plt.xlabel('$t$', fontsize=14)
plt.ylabel('$\phi(t)$', fontsize=14)
plt.title('The logistic function')
plt.grid()
# Weight vector:
w = [1, 4, 8] # Try different weights
# Create a rectangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
Z = logistic(w[0] + w[1]*xx0 + w[2]*xx1)
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
# SOLUTION TO THE EXERCISE
# Weight vector:
w = [1, 10, 10, -20, 5, 1] # Try different weights
# Create a regtangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
Z = logistic(w[0] + w[1]*xx0 + w[2]*xx1 + w[3]*np.multiply(xx0,xx0) +
w[4]*np.multiply(xx0,xx1) + w[3]*np.multiply(xx1,xx1))
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)')
# Adapted from a notebook by Jason Brownlee
def loadDataset(filename, split):
xTrain = []
cTrain = []
xTest = []
cTest = []
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for i in range(len(dataset)-1):
for y in range(4):
dataset[i][y] = float(dataset[i][y])
item = dataset[i]
if random.random() < split:
xTrain.append(item[0:4])
cTrain.append(item[4])
else:
xTest.append(item[0:4])
cTest.append(item[4])
return xTrain, cTrain, xTest, cTest
with open('iris.data', 'rb') as csvfile:
lines = csv.reader(csvfile)
xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66)
nTrain_all = len(xTrain_all)
nTest_all = len(xTest_all)
print 'Train: ' + str(nTrain_all)
print 'Test: ' + str(nTest_all)
# Select attributes
i = 0 # Try 0,1,2,3
j = 1 # Try 0,1,2,3 with j!=i
# Select two classes
c0 = 'Iris-versicolor'
c1 = 'Iris-virginica'
# Select two coordinates
ind = [i, j]
# Take training test
X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1])
C_tr = [cTrain_all[n] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1]
Y_tr = np.array([int(c==c1) for c in C_tr])
n_tr = len(X_tr)
# Take test set
X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1])
C_tst = [cTest_all[n] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1]
Y_tst = np.array([int(c==c1) for c in C_tst])
n_tst = len(X_tst)
def normalize(X, mx=None, sx=None):
# Compute means and standard deviations
if mx is None:
mx = np.mean(X, axis=0)
if sx is None:
sx = np.std(X, axis=0)
# Normalize
X0 = (X-mx)/sx
return X0, mx, sx
# Normalize data
Xn_tr, mx, sx = normalize(X_tr)
Xn_tst, mx, sx = normalize(X_tst, mx, sx)
# Separate components of x into different arrays (just for the plots)
x0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0]
x1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0]
x0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1]
x1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1]
# Scatterplot.
labels = {'Iris-setosa': 'Setosa',
'Iris-versicolor': 'Versicolor',
'Iris-virginica': 'Virginica'}
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
def logregFit(Z_tr, Y_tr, rho, n_it):
# Data dimension
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
nll_tr2 = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
# Compute posterior probabilities for weight w
p1_tr = logistic(np.dot(Z_tr, w))
# Compute negative log-likelihood
# (note that this is not required for the weight update, only for nll tracking)
Y_tr2 = 2*Y_tr - 1
nll_tr[n] = np.sum(np.log(1 + np.exp(-np.dot(Y_tr2*Z_tr, w))))
# Update weights
w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)
return w, nll_tr
def logregPredict(Z, w):
# Compute posterior probability of class 1 for weights w.
p = logistic(np.dot(Z, w))
# Class
D = [int(round(pn)) for pn in p]
return p, D
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 200 # Number of iterations
# Compute Z's
Z_tr = np.c_[np.ones(n_tr), Xn_tr]
Z_tst = np.c_[np.ones(n_tst), Xn_tst]
n_dim = Z_tr.shape[1]
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The optimal weights are:"
print w
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
# Create a regtangular grid.
x_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max()
y_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max()
dx = x_max - x_min
dy = y_max - y_min
h = dy /400
xx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h),
np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h))
X_grid = np.array([xx.ravel(), yy.ravel()]).T
# Compute Z's
Z_grid = np.c_[np.ones(X_grid.shape[0]), X_grid]
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
# Put the result into a color plot
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
pp = pp.reshape(xx.shape)
plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
g = 5 # Degree of polynomial
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The optimal weights are:"
print w
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 8, 4 # Set figure size
for i in [1, 2]:
ax = plt.subplot(1,2,i)
ax.plot(x0c0, x1c0,'r.', label=labels[c0])
ax.plot(x0c1, x1c1,'g+', label=labels[c1])
ax.set_xlabel('$x_' + str(ind[0]) + '$')
ax.set_ylabel('$x_' + str(ind[1]) + '$')
ax.axis('equal')
if i==1:
ax.contourf(xx, yy, pp, cmap=plt.cm.copper)
else:
ax.legend(loc='best')
ax.contourf(xx, yy, np.round(pp), cmap=plt.cm.copper)
def logregFit2(Z_tr, Y_tr, rho, n_it, C=1e4):
# Compute Z's
r = 2.0/C
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
p_tr = logistic(np.dot(Z_tr, w))
sk = np.multiply(p_tr, 1-p_tr)
S = np.diag(np.ravel(sk.T))
# Compute negative log-likelihood
nll_tr[n] = - np.dot(Y_tr.T, np.log(p_tr)) - np.dot((1-Y_tr).T, np.log(1-p_tr))
# Update weights
invH = np.linalg.inv(r*np.identity(n_dim) + np.dot(Z_tr.T, np.dot(S, Z_tr)))
w += rho*np.dot(invH, np.dot(Z_tr.T, Y_tr - p_tr))
return w, nll_tr
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
C = 1000
g = 4
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(X_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(X_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit2(Z_tr, Y_tr2, rho, n_it, C)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
# Create a logistic regression object.
LogReg = linear_model.LogisticRegression(C=1.0)
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Fit model to data.
LogReg.fit(Z_tr, Y_tr)
# Classify training and test data
D_tr = LogReg.predict(Z_tr)
D_tst = LogReg.predict(Z_tst)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
# Compute Z_grid
Z_grid = poly.fit_transform(X_grid)
n_grid = Z_grid.shape[0]
Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz)
Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1)
# Compute the classifier output for all samples in the grid.
dd = LogReg.predict(Z_grid)
pp = LogReg.predict_proba(Z_grid)[:,1]
pp = pp.reshape(xx.shape)
# Paint output maps
pylab.rcParams['figure.figsize'] = 8, 4 # Set figure size
for i in [1, 2]:
ax = plt.subplot(1,2,i)
ax.plot(x0c0, x1c0,'r.', label=labels[c0])
ax.plot(x0c1, x1c1,'g+', label=labels[c1])
ax.set_xlabel('$x_' + str(ind[0]) + '$')
ax.set_ylabel('$x_' + str(ind[1]) + '$')
ax.axis('equal')
if i==1:
ax.contourf(xx, yy, pp, cmap=plt.cm.copper)
else:
ax.legend(loc='best')
ax.contourf(xx, yy, np.round(pp), cmap=plt.cm.copper)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: テキスト生成のフェデレーテッドラーニング
Step2: トレーニング済みモデルを読み込む
Step3: トレーニング済みモデルの読み込みとテキストの生成
Step4: Shakespere のフェデレーテッドデータを読み込んで事前処理する
Step5: shakespeare.load_data() が提供するデータセットは、文字列 Tensors で構成されています。各行はシェイクスピア劇の登場人物のセリフです。client キーは、劇の名前と登場人物の名前を結合したもので、たとえば<br> MUCH_ADO_ABOUT_NOTHING_OTHELLO は「Much Ado About Nothing」という劇の登場人物オセロのセリフに対応しています。実勢のフェデレーテッドラーニングシナリオでは、client は ID で識別または追跡されることはありませんが、シミュレーションでは、キー付きのデータセットを使用する方が役に立ちます。
Step6: tf.data.Dataset 変換を使用して、このデータを上記で読み込んだ文字列 RNN のトレーニング用に準備します。
Step7: 元のシーケンスの形成と上記のバッチの形成では、drop_remainder=True を使って単純化しています。つまり、少なくともテキストの (SEQ_LENGTH + 1) * BATCH_SIZE 文字を持たない登場人物(client))のデータセットは空となります。この状況を解消するために使用される一般的なアプローチはバッチを特殊なトークンでパッドし、パディングトークンを考慮しないように損失量をマスクする方法です。
Step8: モデルをコンパイルし、事前処理済みのデータでテストする
Step9: これで、モデルをコンパイルし、example_dataset で評価できるようになりました。
Step10: フェデレーテッドラーニングでモデルを微調整する
Step11: これで、フェデレーテッドアベレージングのイテレーション処理を構築する準備が整いました。これをモデルの改善に使用します(フェデレーテッドアベレージングアルゴリズムの詳細は、論文「Communication-Efficient Learning of Deep Networks from Decentralized Data」をご覧ください)。
Step12: 次は最も単純なループで、1 つのバッチの単一の client における 1 つのラウンドで、フェデレーテッドアベレージングを実行します。
Step13: では、もう少し興味深いトレーニングと評価ループを記述してみましょう。
Step14: fed_avg.initialize() で生成されるモデルの最初の状態は、読み込まれた重みではなく、Keras モデルのランダムなイニシャライザに基づきます。clone_model() は重みを複製しないためです。トレーニング済みのモデルからトレーニングを始めるには、読み込んだモデルから直接、サーバー状態のモデルの重みを設定します。
Step15: デフォルトの変更により、大きな違いを得るほどのトレーニングはまだ行われていませんが、より長時間、より多くの Shakespeare データをトレーニングする場合、更新したモデルに生成されるテキストのスタイルに違いがみられるようになります。
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import collections
import functools
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
np.random.seed(0)
# Test the TFF is working:
tff.federated_computation(lambda: 'Hello, World!')()
# A fixed vocabularly of ASCII chars that occur in the works of Shakespeare and Dickens:
vocab = list('dhlptx@DHLPTX $(,048cgkoswCGKOSW[_#\'/37;?bfjnrvzBFJNRVZ"&*.26:\naeimquyAEIMQUY]!%)-159\r')
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
def load_model(batch_size):
urls = {
1: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch1.kerasmodel',
8: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch8.kerasmodel'}
assert batch_size in urls, 'batch_size must be in ' + str(urls.keys())
url = urls[batch_size]
local_file = tf.keras.utils.get_file(os.path.basename(url), origin=url)
return tf.keras.models.load_model(local_file, compile=False)
def generate_text(model, start_string):
# From https://www.tensorflow.org/tutorials/sequences/text_generation
num_generate = 200
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
temperature = 1.0
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
predictions = predictions / temperature
predicted_id = tf.random.categorical(
predictions, num_samples=1)[-1, 0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
# Text generation requires a batch_size=1 model.
keras_model_batch1 = load_model(batch_size=1)
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
train_data, test_data = tff.simulation.datasets.shakespeare.load_data()
# Here the play is "The Tragedy of King Lear" and the character is "King".
raw_example_dataset = train_data.create_tf_dataset_for_client(
'THE_TRAGEDY_OF_KING_LEAR_KING')
# To allow for future extensions, each entry x
# is an OrderedDict with a single key 'snippets' which contains the text.
for x in raw_example_dataset.take(2):
print(x['snippets'])
# Input pre-processing parameters
SEQ_LENGTH = 100
BATCH_SIZE = 8
BUFFER_SIZE = 100 # For dataset shuffling
# Construct a lookup table to map string chars to indexes,
# using the vocab loaded above:
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
keys=vocab, values=tf.constant(list(range(len(vocab))),
dtype=tf.int64)),
default_value=0)
def to_ids(x):
s = tf.reshape(x['snippets'], shape=[1])
chars = tf.strings.bytes_split(s).values
ids = table.lookup(chars)
return ids
def split_input_target(chunk):
input_text = tf.map_fn(lambda x: x[:-1], chunk)
target_text = tf.map_fn(lambda x: x[1:], chunk)
return (input_text, target_text)
def preprocess(dataset):
return (
# Map ASCII chars to int64 indexes using the vocab
dataset.map(to_ids)
# Split into individual chars
.unbatch()
# Form example sequences of SEQ_LENGTH +1
.batch(SEQ_LENGTH + 1, drop_remainder=True)
# Shuffle and form minibatches
.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# And finally split into (input, target) tuples,
# each of length SEQ_LENGTH.
.map(split_input_target))
example_dataset = preprocess(raw_example_dataset)
print(example_dataset.element_spec)
class FlattenedCategoricalAccuracy(tf.keras.metrics.SparseCategoricalAccuracy):
def __init__(self, name='accuracy', dtype=tf.float32):
super().__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.reshape(y_true, [-1, 1])
y_pred = tf.reshape(y_pred, [-1, len(vocab), 1])
return super().update_state(y_true, y_pred, sample_weight)
BATCH_SIZE = 8 # The training and eval batch size for the rest of this tutorial.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
# Confirm that loss is much lower on Shakespeare than on random data
loss, accuracy = keras_model.evaluate(example_dataset.take(5), verbose=0)
print(
'Evaluating on an example Shakespeare character: {a:3f}'.format(a=accuracy))
# As a sanity check, we can construct some completely random data, where we expect
# the accuracy to be essentially random:
random_guessed_accuracy = 1.0 / len(vocab)
print('Expected accuracy for random guessing: {a:.3f}'.format(
a=random_guessed_accuracy))
random_indexes = np.random.randint(
low=0, high=len(vocab), size=1 * BATCH_SIZE * (SEQ_LENGTH + 1))
data = collections.OrderedDict(
snippets=tf.constant(
''.join(np.array(vocab)[random_indexes]), shape=[1, 1]))
random_dataset = preprocess(tf.data.Dataset.from_tensor_slices(data))
loss, accuracy = keras_model.evaluate(random_dataset, steps=10, verbose=0)
print('Evaluating on completely random data: {a:.3f}'.format(a=accuracy))
# Clone the keras_model inside `create_tff_model()`, which TFF will
# call to produce a new copy of the model inside the graph that it will
# serialize. Note: we want to construct all the necessary objects we'll need
# _inside_ this method.
def create_tff_model():
# TFF uses an `input_spec` so it knows the types and shapes
# that your model expects.
input_spec = example_dataset.element_spec
keras_model_clone = tf.keras.models.clone_model(keras_model)
return tff.learning.from_keras_model(
keras_model_clone,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
# This command builds all the TensorFlow graphs and serializes them:
fed_avg = tff.learning.build_federated_averaging_process(
model_fn=create_tff_model,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(lr=0.5))
state = fed_avg.initialize()
state, metrics = fed_avg.next(state, [example_dataset.take(5)])
train_metrics = metrics['train']
print('loss={l:.3f}, accuracy={a:.3f}'.format(
l=train_metrics['loss'], a=train_metrics['accuracy']))
def data(client, source=train_data):
return preprocess(source.create_tf_dataset_for_client(client)).take(5)
clients = [
'ALL_S_WELL_THAT_ENDS_WELL_CELIA', 'MUCH_ADO_ABOUT_NOTHING_OTHELLO',
]
train_datasets = [data(client) for client in clients]
# We concatenate the test datasets for evaluation with Keras by creating a
# Dataset of Datasets, and then identity flat mapping across all the examples.
test_dataset = tf.data.Dataset.from_tensor_slices(
[data(client, test_data) for client in clients]).flat_map(lambda x: x)
NUM_ROUNDS = 5
# The state of the FL server, containing the model and optimization state.
state = fed_avg.initialize()
# Load our pre-trained Keras model weights into the global model state.
state = tff.learning.state_with_new_model_weights(
state,
trainable_weights=[v.numpy() for v in keras_model.trainable_weights],
non_trainable_weights=[
v.numpy() for v in keras_model.non_trainable_weights
])
def keras_evaluate(state, round_num):
# Take our global model weights and push them back into a Keras model to
# use its standard `.evaluate()` method.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[FlattenedCategoricalAccuracy()])
state.model.assign_weights_to(keras_model)
loss, accuracy = keras_model.evaluate(example_dataset, steps=2, verbose=0)
print('\tEval: loss={l:.3f}, accuracy={a:.3f}'.format(l=loss, a=accuracy))
for round_num in range(NUM_ROUNDS):
print('Round {r}'.format(r=round_num))
keras_evaluate(state, round_num)
state, metrics = fed_avg.next(state, train_datasets)
train_metrics = metrics['train']
print('\tTrain: loss={l:.3f}, accuracy={a:.3f}'.format(
l=train_metrics['loss'], a=train_metrics['accuracy']))
print('Final evaluation')
keras_evaluate(state, NUM_ROUNDS + 1)
# Set our newly trained weights back in the originally created model.
keras_model_batch1.set_weights([v.numpy() for v in keras_model.weights])
# Text generation requires batch_size=1
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this guide, we will show how to tailor the search space without changing the
Step2: We will reuse this search space in the rest of the tutorial by overriding the
Step3: If you summarize the search space, you will see only one hyperparameter.
Step4: Fix a few and tune the rest
Step5: If you summarize the search space, you will see the learning_rate is marked
Step6: Overriding compilation arguments
Step7: If you get the best model, you can see the loss function has changed to MSE.
Step8: Tailor the search space of pre-build HyperModels
|
<ASSISTANT_TASK:>
Python Code:
!pip install keras-tuner -q
from tensorflow import keras
from tensorflow.keras import layers
import keras_tuner
import numpy as np
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
units=hp.Int("units", min_value=32, max_value=128, step=32, default=64)
)
)
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add
model.compile(
optimizer=keras.optimizers.Adam(
learning_rate=hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
hp = keras_tuner.HyperParameters()
# This will override the `learning_rate` parameter with your
# own selection of choices
hp.Float("learning_rate", min_value=1e-4, max_value=1e-2, sampling="log")
tuner = keras_tuner.RandomSearch(
hypermodel=build_model,
hyperparameters=hp,
# Prevents unlisted parameters from being tuned
tune_new_entries=False,
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="search_a_few",
)
# Generate random data
x_train = np.random.rand(100, 28, 28, 1)
y_train = np.random.randint(0, 10, (100, 1))
x_val = np.random.rand(20, 28, 28, 1)
y_val = np.random.randint(0, 10, (20, 1))
# Run the search
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
tuner.search_space_summary()
hp = keras_tuner.HyperParameters()
hp.Fixed("learning_rate", value=1e-4)
tuner = keras_tuner.RandomSearch(
build_model,
hyperparameters=hp,
tune_new_entries=True,
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="fix_a_few",
)
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
tuner.search_space_summary()
tuner = keras_tuner.RandomSearch(
build_model,
optimizer=keras.optimizers.Adam(1e-3),
loss="mse",
metrics=["sparse_categorical_crossentropy",],
objective="val_loss",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="override_compile",
)
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
tuner.get_best_models()[0].loss
hypermodel = keras_tuner.applications.HyperXception(input_shape=(28, 28, 1), classes=10)
hp = keras_tuner.HyperParameters()
# This will override the `learning_rate` parameter with your
# own selection of choices
hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
tuner = keras_tuner.RandomSearch(
hypermodel,
hyperparameters=hp,
# Prevents unlisted parameters from being tuned
tune_new_entries=False,
# Override the loss.
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="helloworld",
)
# Run the search
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
tuner.search_space_summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Impute PE
Step2: Impute PE through random forest regression
Step3: This approach gives us an expected RMSE of about 0.575 - now let's impute the missing data using this approach!
Step4: Now we have a full data set with no missing values!
Step5: I proceed to run Paolo Bestagini's routines to include a small window of values to account for the spatial component in the log analysis, as well as gradient information with respect to depth.
Step6: Now I'll apply the Paolo routines to the data - augmenting the features!
Step7: Tuning and Cross-Validation
Step8: Apply tuning to search for optimal hyperparameters.
Step9: Through tuning we observe optimal hyperparameters to be 250 (number of estimators), 2 (minimum number of samples per leaf), 75 (maximum number of features to consider when looking for the optimal split), and 5 (minimum number of samples required to split a node). These values yielded an average F1-score of 0.584 through cross-validation.
Step10: Now I will apply Paolo Bestagini's routines.
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import preprocessing
filename = '../facies_vectors.csv'
train = pd.read_csv(filename)
# encode well name and formation features
le = preprocessing.LabelEncoder()
train["Well Name"] = le.fit_transform(train["Well Name"])
train["Formation"] = le.fit_transform(train["Formation"])
data_loaded = train.copy()
# cleanup memory
del train
data_loaded
from sklearn import preprocessing
data = data_loaded.copy()
impPE_features = ['Facies', 'Formation', 'Well Name', 'GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'NM_M', 'RELPOS']
rmse = []
for w in data["Well Name"].unique():
wTrain = data[(data["PE"].notnull()) & (data["Well Name"] != w)]
wTest = data[(data["PE"].notnull()) & (data["Well Name"] == w)]
if wTest.shape[0] > 0:
yTest = wTest["PE"].values
meanPE = wTrain["PE"].mean()
wTest["predictedPE"] = meanPE
rmse.append((((yTest - wTest["predictedPE"])**2).mean())**0.5)
print(rmse)
print("Average RMSE:" + str(sum(rmse)/len(rmse)))
# cleanup memory
del data
from sklearn.ensemble import RandomForestRegressor
data = data_loaded.copy()
impPE_features = ['Facies', 'Formation', 'Well Name', 'GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'NM_M', 'RELPOS']
rf = RandomForestRegressor(max_features='sqrt', n_estimators=100, random_state=1)
rmse = []
for w in data["Well Name"].unique():
wTrain = data[(data["PE"].isnull() == False) & (data["Well Name"] != w)]
wTest = data[(data["PE"].isnull() == False) & (data["Well Name"] == w)]
if wTest.shape[0] > 0:
XTrain = wTrain[impPE_features].values
yTrain = wTrain["PE"].values
XTest = wTest[impPE_features].values
yTest = wTest["PE"].values
w_rf = rf.fit(XTrain, yTrain)
predictedPE = w_rf.predict(XTest)
rmse.append((((yTest - predictedPE)**2).mean())**0.5)
print(rmse)
print("Average RMSE:" + str(sum(rmse)/len(rmse)))
# cleanup memory
del data
data = data_loaded.copy()
rf_train = data[data['PE'].notnull()]
rf_test = data[data['PE'].isnull()]
xTrain = rf_train[impPE_features].values
yTrain = rf_train["PE"].values
xTest = rf_test[impPE_features].values
rf_fit = rf.fit(xTrain, yTrain)
predictedPE = rf_fit.predict(xTest)
data["PE"][data["PE"].isnull()] = predictedPE
data_imputed = data.copy()
# cleanup memory
del data
# output
data_imputed
facies_labels = ['SS','CSiS','FSiS','SiSh','MS','WS','D','PS','BS']
data = data_imputed.copy()
features = ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE"]
for f in features:
facies_mean = data[f].groupby(data["Facies"]).mean()
for i in range(0, len(facies_mean)):
data[f + "_" + facies_labels[i] + "_SqDev"] = (data[f] - facies_mean.values[i])**2
data_fe = data.copy()
del data
data_fe
# Feature windows concatenation function
def augment_features_window(X, N_neig):
# Parameters
N_row = X.shape[0]
N_feat = X.shape[1]
# Zero padding
X = np.vstack((np.zeros((N_neig, N_feat)), X, (np.zeros((N_neig, N_feat)))))
# Loop over windows
X_aug = np.zeros((N_row, N_feat*(2*N_neig+1)))
for r in np.arange(N_row)+N_neig:
this_row = []
for c in np.arange(-N_neig,N_neig+1):
this_row = np.hstack((this_row, X[r+c]))
X_aug[r-N_neig] = this_row
return X_aug
# Feature gradient computation function
def augment_features_gradient(X, depth):
# Compute features gradient
d_diff = np.diff(depth).reshape((-1, 1))
d_diff[d_diff==0] = 0.001
X_diff = np.diff(X, axis=0)
X_grad = X_diff / d_diff
# Compensate for last missing value
X_grad = np.concatenate((X_grad, np.zeros((1, X_grad.shape[1]))))
return X_grad
# Feature augmentation function
def augment_features(X, well, depth, N_neig=1):
# Augment features
X_aug = np.zeros((X.shape[0], X.shape[1]*(N_neig*2+2)))
for w in np.unique(well):
w_idx = np.where(well == w)[0]
X_aug_win = augment_features_window(X[w_idx, :], N_neig)
X_aug_grad = augment_features_gradient(X[w_idx, :], depth[w_idx])
X_aug[w_idx, :] = np.concatenate((X_aug_win, X_aug_grad), axis=1)
# Find padded rows
padded_rows = np.unique(np.where(X_aug[:, 0:7] == np.zeros((1, 7)))[0])
return X_aug, padded_rows
data = data_fe.copy()
remFeatures = ["Facies", "Well Name", "Depth"]
x = list(data)
features = [f for f in x if f not in remFeatures]
X = data[features].values
y = data["Facies"].values
# Store well labels and depths
well = data['Well Name']
depth = data['Depth'].values
X_aug, padded_rows = augment_features(X, well.values, depth)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
#from classification_utilities import display_cm, display_adj_cm
# 1) loops through wells - splitting data (current well held out as CV/test)
# 2) trains model (using all wells excluding current)
# 3) evaluates predictions against known values and adds f1-score to array
# 4) returns average f1-score (expected f1-score)
def cvTrain(X, y, well, params):
rf = RandomForestClassifier(max_features=params['M'], n_estimators=params['N'], criterion='entropy',
min_samples_split=params['S'], min_samples_leaf=params['L'], random_state=1)
f1 = []
for w in well.unique():
Xtrain_w = X[well.values != w]
ytrain_w = y[well.values != w]
Xtest_w = X[well.values == w]
ytest_w = y[well.values == w]
w_rf = rf.fit(Xtrain_w, ytrain_w)
predictedFacies = w_rf.predict(Xtest_w)
f1.append(f1_score(ytest_w, predictedFacies, average='micro'))
f1 = (sum(f1)/len(f1))
return f1
# parameters search grid (uncomment for full grid search - will take a long time)
N_grid = [250] #[50, 250, 500] # n_estimators
M_grid = [75] #[25, 50, 75] # max_features
S_grid = [5] #[5, 10] # min_samples_split
L_grid = [2] #[2, 3, 5] # min_samples_leaf
# build grid of hyperparameters
param_grid = []
for N in N_grid:
for M in M_grid:
for S in S_grid:
for L in L_grid:
param_grid.append({'N':N, 'M':M, 'S':S, 'L':L})
# loop through parameters and cross-validate models for each
for params in param_grid:
print(str(params) + ' Average F1-score: ' + str(cvTrain(X_aug, y, well, params)))
from sklearn import preprocessing
filename = '../validation_data_nofacies.csv'
test = pd.read_csv(filename)
# encode well name and formation features
le = preprocessing.LabelEncoder()
test["Well Name"] = le.fit_transform(test["Well Name"])
test["Formation"] = le.fit_transform(test["Formation"])
test_loaded = test.copy()
facies_labels = ['SS','CSiS','FSiS','SiSh','MS','WS','D','PS','BS']
train = data_imputed.copy()
features = ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE"]
for f in features:
facies_mean = train[f].groupby(train["Facies"]).mean()
for i in range(0, len(facies_mean)):
test[f + "_" + facies_labels[i] + "_SqDev"] = (test[f] - facies_mean.values[i])**2
test_fe = test.copy()
del test
test_fe
test = test_fe.copy()
remFeatures = ["Well Name", "Depth"]
x = list(test)
features = [f for f in x if f not in remFeatures]
Xtest = test[features].values
# Store well labels and depths
welltest = test['Well Name']
depthtest = test['Depth'].values
Xtest_aug, test_padded_rows = augment_features(Xtest, welltest.values, depthtest)
from sklearn.ensemble import RandomForestClassifier
test = test_loaded.copy()
rf = RandomForestClassifier(max_features=75, n_estimators=250, criterion='entropy',
min_samples_split=5, min_samples_leaf=2, random_state=1)
fit = rf.fit(X_aug, y)
predictedFacies = fit.predict(Xtest_aug)
test["Facies"] = predictedFacies
test.to_csv('jpoirier011_submission001.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Optimisation
Step2: We can still easily generate some data
Step3: And we can define a log likelihood, and use optimisation to try and find back these parameters
Step4: We can inspect the simulated output
Step5: It looks fine, but what happens if we run it again?
Step6: So now we have two very different solutions, but both give an excellent fit.
Step7: We now try out a bit of MCMC, to see what's going on in the parameter space
Step8: We can see from the histograms that the chains are moving over a wide area, and in the plots on the right we see the same.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pints
import pints.plot
class BadLogisticModel(pints.ForwardModel):
Logistic model of population growth with unidentifiable parameters.
def __init__(self):
super(BadLogisticModel, self).__init__()
# Initial population size
self._p0 = 2
# Fixed growth rate
self._r = 0.1
def n_parameters(self):
return 2
def simulate(self, parameters, times):
k1, k2 = parameters
times = np.asarray(times)
# Combine k1 and k2 into a single parameter, k
k = 41 + np.sqrt(k1**2 + k2**2)
return k / (1 + (k / self._p0 - 1) * np.exp(-self._r * times))
model = BadLogisticModel()
real_parameters = [3, 3]
times = np.linspace(0, 100, 100)
experiment = model.simulate(real_parameters, times)
sigma_noise = 2
noisy_experiment = experiment + np.random.normal(0, sigma_noise, size=experiment.shape)
plt.figure(figsize=(15, 4))
plt.xlabel('Time')
plt.ylabel('Population')
plt.plot(times, noisy_experiment, 'x')
plt.show()
problem = pints.SingleOutputProblem(model, times, noisy_experiment)
log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma_noise)
opt = pints.OptimisationController(log_likelihood, [6, 2], method=pints.XNES)
opt.set_log_to_screen(False)
x1, f1 = opt.run()
print('Found solution with loglikelihood ' + str(f1))
print(x1)
plt.figure(figsize=(15, 4))
plt.xlabel('Time')
plt.ylabel('Population')
plt.plot(times, noisy_experiment, 'x', label='Data')
plt.plot(times, model.simulate(x1, times), label='Fit')
plt.legend()
plt.show()
opt = pints.OptimisationController(log_likelihood, [2, 6], method=pints.XNES)
opt.set_log_to_screen(False)
x2, f2 = opt.run()
print('Found solution with loglikelihood ' + str(f2))
print(x2)
plt.figure(figsize=(15, 4))
plt.xlabel('Time')
plt.ylabel('Population')
plt.plot(times, noisy_experiment, 'x')
plt.plot(times, model.simulate(x2, times))
plt.show()
opt = pints.OptimisationController(log_likelihood, [2, 6], method=pints.XNES)
opt.set_log_to_screen(False)
x3, f3 = opt.run()
print('Found solution with loglikelihood ' + str(f3))
print(x3)
plt.figure(figsize=(15, 4))
plt.xlabel('Time')
plt.ylabel('Population')
plt.plot(times, noisy_experiment, 'x')
plt.plot(times, model.simulate(x3, times))
plt.show()
mcmc = pints.MCMCController(log_likelihood, 3, [x1, x2, x3])
mcmc.set_max_iterations(6000)
mcmc.set_log_to_screen(False)
chains = mcmc.run()
# Show traces and histograms
pints.plot.trace(chains)
plt.show()
plt.figure(figsize=(7, 7))
plt.xlabel('Parameter 1 (k1)')
plt.ylabel('Parameter 2 (k2)')
plt.plot(chains[0, :, 0], chains[0, :, 1], 'x', alpha=0.2)
plt.plot(chains[1, :, 0], chains[1, :, 1], 'x', alpha=0.2)
plt.plot(chains[2, :, 0], chains[2, :, 1], 'x', alpha=0.2)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I would like to get the Best Seller list for the Month of October 2015. First I signed up to the New York Times API, and afterwards received a key in seconds.
Step2: Above I've used the urllib module and its urlopen method to request the data using the NY Times Books API. What is returned is a json file, that must be loaded into python using the json module and load method.
Step3: After viweing the information in the data variable which gives access to the json file. I've decided to create a dictionary to save the information in as we loop through the data json life.
Step4: After parsing through the data i chose to collect the book ranks, title, authors, and both 10 digit and 13 digit ISBN along with a description of the books.
Step5: What i am left with is a DataFrame from the clean_data dictionary as the data and manual titled columns, indexed by the Ranking of the books.
|
<ASSISTANT_TASK:>
Python Code:
import urllib2
import json
import pandas as pd
url = urllib2.urlopen('http://api.nytimes.com/svc/books/v3/lists/2015-10-01/hardcover-fiction.json?callback=books&sort-by=rank&sort-order=DESC&api-key=efb1f6ff386ce33c0b913d44bce40fd8%3A10%3A73015082')
data = json.load(url)
clean_data = {}
for item in data['results']['books']:
clean_data[item['rank']] = [item['title'], item['author'], item['primary_isbn10'], item['primary_isbn13'], item['description']]
clean_data
best_seller = pd.DataFrame(clean_data.values(), columns = ['Title', 'Author', 'ISBN:10', 'ISBN:13', 'Description'],
index = clean_data.keys())
best_seller
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The SeqRecord Object
Step2: Additionally, you can also pass the id, name and description to the initialization function, but if not they will be set as strings indicating they are unknown, and can be modified subsequently
Step3: Including an identifier is very important if you want to output your SeqRecord to a file. You would normally include this when creating the object
Step4: As mentioned above, the SeqRecord has an dictionary attribute annotations. This is used
Step5: Working with per-letter-annotations is similar, letter_annotations is a
Step6: The dbxrefs and features attributes are just Python lists, and
Step7: Now, let's have a look at the key attributes of this SeqRecord
Step8: Next, the identifiers and description
Step9: As you can see above, the first word of the FASTA record's title line (after
Step10: In this case our example FASTA file was from the NCBI, and they have a fairly well defined set of conventions for formatting their FASTA lines. This means it would be possible to parse this information and extract the GI number and accession for example. However, FASTA files from other sources vary, so this isn't possible in general.
Step11: You should be able to spot some differences already! But taking the attributes individually,
Step12: The name comes from the LOCUS line, while the \verb|id| includes the version suffix.
Step13: GenBank files don't have any per-letter annotations
Step14: Most of the annotations information gets recorded in the \verb|annotations| dictionary, for example
Step15: The dbxrefs list gets populated from any PROJECT or DBLINK lines
Step16: Finally, and perhaps most interestingly, all the entries in the features table (e.g. the genes or CDS features) get recorded as SeqFeature objects in the features list.
Step17: Feature, location and position objects
Step18: Note that the details of some of the fuzzy-locations changed in Biopython 1.59,
Step19: We can access the fuzzy start and end positions using the start and end attributes of the location
Step20: If you don't want to deal with fuzzy positions and just want numbers,
Step21: For compatibility with older versions of Biopython you can ask for the
Step22: Notice that this just gives you back the position attributes of the fuzzy locations.
Step23: That is most of the nitty gritty about dealing with fuzzy positions in Biopython.
Step24: Note that gene and CDS features from GenBank or EMBL files defined with joins
Step25: You could take the parent sequence, slice it to extract 5
Step26: This is a simple example so this isn't too bad -- however once you have to deal with compound features (joins) this is rather messy. Instead, the SeqFeature object has an extract method to take care of all this (and since Biopython 1.78 can handle trans-splicing by supplying a dictionary of referenced sequences)
Step27: The length of a SeqFeature or location matches
Step28: For simple FeatureLocation objects the length is just the difference between the start and end positions. However, for a CompoundLocation the length is the sum of the constituent regions.
Step29: What happens when you try to compare these “identical” records?
Step30: Perhaps surprisingly older versions of Biopython would use Python’s default object comparison for theSeqRecord, meaning record1 == record2 would only return True if these variables pointed at the same object in memory. In this example, record1 == record2 would have returned False here!
Step31: False
Step32: Instead you should check the attributes you are interested in, for example the identifier and the sequence
Step33: Beware that comparing complex objects quickly gets complicated.
Step34: This format method takes a single mandatory argument, a lower case string which is
Step35: For this example we're going to focus in on the pim gene, YP_pPCP05.
Step36: Let's slice this parent record from 4300 to 4800 (enough to include the pim
Step37: Our sub-record just has two features, the gene and CDS entries for YP_pPCP05
Step38: Notice that their locations have been adjusted to reflect the new parent sequence!
Step39: The same point could be made about the record id, name
Step40: This illustrates the problem nicely though, our new sub-record is
Step41: Adding SeqRecord objects
Step42: Let's suppose this was Roche 454 data, and that from other information
Step43: Now add the two parts together
Step44: Easy and intuitive? We hope so! You can make this shorter with just
Step45: Now, for an example with features, we'll use a GenBnak file.
Step46: You can shift the origin like this
Step47: You can shift the origin like this
Step48: Note that this isn't perfect in that some annotation like the database cross references
Step49: This is because the SeqRecord slicing step is cautious in what annotation
Step50: Also note that in an example like this, you should probably change the record
Step51: Here we take the reverse complement and specify a new identifier - but notice
|
<ASSISTANT_TASK:>
Python Code:
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
simple_seq = Seq("GATC")
simple_seq_r = SeqRecord(simple_seq)
simple_seq_r.id
simple_seq_r.id = "AC12345"
simple_seq_r.description = "Made up sequence I wish I could write a paper about"
print(simple_seq_r.description)
simple_seq_r.seq
print(simple_seq_r.seq)
simple_seq = Seq("GATC")
simple_seq_r = SeqRecord(simple_seq, id="AC12345")
simple_seq_r.annotations["evidence"] = "None. I just made it up."
print(simple_seq_r.annotations)
print(simple_seq_r.annotations["evidence"])
simple_seq_r.letter_annotations["phred_quality"] = [40, 40, 38, 30]
print(simple_seq_r.letter_annotations)
print(simple_seq_r.letter_annotations["phred_quality"])
from Bio import SeqIO
record = SeqIO.read("data/NC_005816.fna", "fasta")
record
record.seq
record.id
record.name
record.description
record.dbxrefs
record.annotations
record.letter_annotations
record.features
record = SeqIO.read("data/NC_005816.gb", "genbank")
record
record.seq
record.id
record.name
record.description
record.letter_annotations
len(record.annotations)
record.annotations["source"]
record.dbxrefs
len(record.features)
from Bio import SeqFeature
start_pos = SeqFeature.AfterPosition(5)
end_pos = SeqFeature.BetweenPosition(9, left=8, right=9)
my_location = SeqFeature.FeatureLocation(start_pos, end_pos)
print(my_location)
my_location.start
print(my_location.start)
my_location.end
print(my_location.end)
int(my_location.start)
int(my_location.end)
my_location.nofuzzy_start
my_location.nofuzzy_end
exact_location = SeqFeature.FeatureLocation(5, 9)
print(exact_location)
exact_location.start
print(int(exact_location.start))
exact_location.nofuzzy_start
my_snp = 4350
record = SeqIO.read("data/NC_005816.gb", "genbank")
for feature in record.features:
if my_snp in feature:
print("%s %s" % (feature.type, feature.qualifiers.get('db_xref')))
from Bio.SeqFeature import SeqFeature, FeatureLocation
seq = Seq("ACCGAGACGGCAAAGGCTAGCATAGGTATGAGACTTCCTTCCTGCCAGTGCTGAGGAACTGGGAGCCTAC")
feature = SeqFeature(FeatureLocation(5, 18), type="gene", strand=-1)
feature_seq = seq[feature.location.start:feature.location.end].reverse_complement()
print(feature_seq)
feature_seq = feature.extract(seq)
print(feature_seq)
print(len(feature_seq))
print(len(feature))
print(len(feature.location))
from Bio.SeqRecord import SeqRecord
record1 = SeqRecord(Seq("ACGT"), id="test")
record2 = SeqRecord(Seq("ACGT"), id="test")
record1 == record2
record1 == record2 # on old versions of Biopython!
record1 == record2
record1.id == record2.id
record1.seq == record2.seq
record = SeqRecord(
Seq(
"MMYQQGCFAGGTVLRLAKDLAENNRGARVLVVCSEITAVTFRGPSETHLDSMVGQALFGD"
"GAGAVIVGSDPDLSVERPLYELVWTGATLLPDSEGAIDGHLREVGLTFHLLKDVPGLISK"
"NIEKSLKEAFTPLGISDWNSTFWIAHPGGPAILDQVEAKLGLKEEKMRATREVLSEYGNM"
"SSAC"
),
id="gi|14150838|gb|AAK54648.1|AF376133_1",
description="chalcone synthase [Cucumis sativus]",
)
print(record.format("fasta"))
record = SeqIO.read("data/NC_005816.gb", "genbank")
print(record)
len(record)
len(record.features)
print(record.features[20])
print(record.features[21])
sub_record = record[4300:4800]
sub_record
len(sub_record)
len(sub_record.features)
print(sub_record.features[0])
print(sub_record.features[1])
print(sub_record.annotations)
print(sub_record.dbxrefs)
print(sub_record.id)
print(sub_record.name)
print(sub_record.description)
sub_record.description ="Yersinia pestis biovar Microtus str. 91001 plasmid pPCP1, partial."
print(sub_record.format("fasta"))
record = next(SeqIO.parse("data/example.fastq", "fastq"))
print(len(record))
print(record.seq)
print(record.letter_annotations["phred_quality"])
left = record[:20]
print(left.seq)
print(left.letter_annotations["phred_quality"])
right = record[21:]
print(right.seq)
print(right.letter_annotations["phred_quality"])
edited = left + right
print(len(edited))
print(edited.seq)
print(edited.letter_annotations["phred_quality"])
edited = record[:20] + record[21:]
record = SeqIO.read("data/NC_005816.gb", "genbank")
print(record)
print(len(record))
print(len(record.features))
print(record.dbxrefs)
print(record.annotations.keys())
shifted = record[2000:] + record[:2000]
print(shifted)
print(len(shifted))
print(len(shifted.features))
print(shifted.dbxrefs)
print(shifted.annotations.keys())
shifted.dbxrefs = record.dbxrefs[:]
shifted.annotations = record.annotations.copy()
print(shifted.dbxrefs)
print(shifted.annotations.keys())
record = SeqIO.read("data/NC_005816.gb", "genbank")
print("%s %i %i %i %i" % (record.id, len(record), len(record.features), len(record.dbxrefs), len(record.annotations)))
rc = record.reverse_complement(id="TESTING")
print("%s %i %i %i %i" % (rc.id, len(rc), len(rc.features), len(rc.dbxrefs), len(rc.annotations)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Observe that the process model that we discovered, describes the same behavior as the model that we have shown above.
Step2: We'll analyze the process tree model from top to bottom.
Step3: Observe that both functions return three arguments, i.e., the Petri net, an initial and a final marking.
Step4: Note that, by definition, the alpha miner variants cannot discover invisible transitions (black boxes).
Step5: The pm4py.discover_dfg(log) function returns a triple.
Step6: Advanced Discovery
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import pm4py
df = pm4py.format_dataframe(pd.read_csv('data/running_example.csv', sep=';'), case_id='case_id',activity_key='activity',
timestamp_key='timestamp')
bpmn_model = pm4py.discover_bpmn_inductive(df)
pm4py.view_bpmn(bpmn_model)
process_tree = pm4py.discover_process_tree_inductive(df)
pm4py.view_process_tree(process_tree)
net1, im1, fm1 = pm4py.convert_to_petri_net(process_tree)
pm4py.view_petri_net(net1,im1,fm1)
net2, im2, fm2 = pm4py.discover_petri_net_inductive(df)
pm4py.view_petri_net(net2, im2, fm2)
net3, im3, fm3 = pm4py.discover_petri_net_alpha(df)
pm4py.view_petri_net(net3, im3, fm3)
net4, im4, fm4 = pm4py.discover_petri_net_alpha_plus(df)
pm4py.view_petri_net(net4, im4, fm4)
dfg, start_activities, end_activities = pm4py.discover_dfg(df)
pm4py.view_dfg(dfg, start_activities, end_activities)
map = pm4py.discover_heuristics_net(df)
pm4py.view_heuristics_net(map)
df_broken = pd.read_csv('data/running_example_broken.csv', sep=';')
bpmn_unfiltered = pm4py.discover_bpmn_inductive(df_broken)
pm4py.view_bpmn(bpmn_unfiltered)
bpmn_filtered = pm4py.discover_bpmn_inductive(df_broken, 0.8)
pm4py.view_bpmn(bpmn_filtered)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model
Step2: States and control inputs
Step3: The control input is the feed flow rate $u_{\text{inp}}$ of $S_{\text{s}}$
Step4: ODE and parameters
Step5: In the next step, the ODE for each state is set
Step6: Finally, the model setup is completed
Step7: Controller
Step8: We choose the prediction horizon n_horizon, set the robust horizon n_robust to 3. The time step t_step is set to one second and parameters of the applied discretization scheme orthogonal collocation are as seen below
Step9: Objective
Step10: Constraints
Step11: Uncertain values
Step12: This means with n_robust=1, that 9 different scenarios are considered.
Step13: Estimator
Step14: Simulator
Step15: For the simulation, we use the time step t_step as for the optimizer
Step16: Realizations of uncertain parameters
Step17: We define a function which is called in each simulation step, which gives the current realization of the uncertain parameters, with respect to defined inputs (in this case t_now)
Step18: By defining p_fun as above, the function will always return the same values.
Step19: Closed-loop simulation
Step20: Prepare visualization
Step21: We use the plotting capabilities, which are included in do-mpc.
Step22: A figure containing the 4 states and the control input are created
Step23: Run closed-loop
Step24: Results
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sys
from casadi import *
# Add do_mpc to path. This is not necessary if it was installed via pip
sys.path.append('../../../')
# Import do_mpc package:
import do_mpc
model_type = 'continuous' # either 'discrete' or 'continuous'
model = do_mpc.model.Model(model_type)
# States struct (optimization variables):
X_s = model.set_variable('_x', 'X_s')
S_s = model.set_variable('_x', 'S_s')
P_s = model.set_variable('_x', 'P_s')
V_s = model.set_variable('_x', 'V_s')
# Input struct (optimization variables):
inp = model.set_variable('_u', 'inp')
# Certain parameters
mu_m = 0.02
K_m = 0.05
K_i = 5.0
v_par = 0.004
Y_p = 1.2
# Uncertain parameters:
Y_x = model.set_variable('_p', 'Y_x')
S_in = model.set_variable('_p', 'S_in')
# Auxiliary term
mu_S = mu_m*S_s/(K_m+S_s+(S_s**2/K_i))
# Differential equations
model.set_rhs('X_s', mu_S*X_s - inp/V_s*X_s)
model.set_rhs('S_s', -mu_S*X_s/Y_x - v_par*X_s/Y_p + inp/V_s*(S_in-S_s))
model.set_rhs('P_s', v_par*X_s - inp/V_s*P_s)
model.set_rhs('V_s', inp)
# Build the model
model.setup()
mpc = do_mpc.controller.MPC(model)
setup_mpc = {
'n_horizon': 20,
'n_robust': 1,
'open_loop': 0,
't_step': 1.0,
'state_discretization': 'collocation',
'collocation_type': 'radau',
'collocation_deg': 2,
'collocation_ni': 2,
'store_full_solution': True,
# Use MA27 linear solver in ipopt for faster calculations:
#'nlpsol_opts': {'ipopt.linear_solver': 'MA27'}
}
mpc.set_param(**setup_mpc)
mterm = -model.x['P_s'] # terminal cost
lterm = -model.x['P_s'] # stage cost
mpc.set_objective(mterm=mterm, lterm=lterm)
mpc.set_rterm(inp=1.0) # penalty on input changes
# lower bounds of the states
mpc.bounds['lower', '_x', 'X_s'] = 0.0
mpc.bounds['lower', '_x', 'S_s'] = -0.01
mpc.bounds['lower', '_x', 'P_s'] = 0.0
mpc.bounds['lower', '_x', 'V_s'] = 0.0
# upper bounds of the states
mpc.bounds['upper', '_x','X_s'] = 3.7
mpc.bounds['upper', '_x','P_s'] = 3.0
# upper and lower bounds of the control input
mpc.bounds['lower','_u','inp'] = 0.0
mpc.bounds['upper','_u','inp'] = 0.2
Y_x_values = np.array([0.5, 0.4, 0.3])
S_in_values = np.array([200.0, 220.0, 180.0])
mpc.set_uncertainty_values(Y_x = Y_x_values, S_in = S_in_values)
mpc.setup()
estimator = do_mpc.estimator.StateFeedback(model)
simulator = do_mpc.simulator.Simulator(model)
params_simulator = {
'integration_tool': 'cvodes',
'abstol': 1e-10,
'reltol': 1e-10,
't_step': 1.0
}
simulator.set_param(**params_simulator)
p_num = simulator.get_p_template()
p_num['Y_x'] = 0.4
p_num['S_in'] = 200.0
# function definition
def p_fun(t_now):
return p_num
# Set the user-defined function above as the function for the realization of the uncertain parameters
simulator.set_p_fun(p_fun)
simulator.setup()
# Initial state
X_s_0 = 1.0 # Concentration biomass [mol/l]
S_s_0 = 0.5 # Concentration substrate [mol/l]
P_s_0 = 0.0 # Concentration product [mol/l]
V_s_0 = 120.0 # Volume inside tank [m^3]
x0 = np.array([X_s_0, S_s_0, P_s_0, V_s_0])
# Set for controller, simulator and estimator
mpc.x0 = x0
simulator.x0 = x0
estimator.x0 = x0
mpc.set_initial_guess()
import matplotlib.pyplot as plt
plt.ion()
from matplotlib import rcParams
rcParams['text.usetex'] = True
rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}',r'\usepackage{siunitx}']
rcParams['axes.grid'] = True
rcParams['lines.linewidth'] = 2.0
rcParams['axes.labelsize'] = 'xx-large'
rcParams['xtick.labelsize'] = 'xx-large'
rcParams['ytick.labelsize'] = 'xx-large'
mpc_graphics = do_mpc.graphics.Graphics(mpc.data)
sim_graphics = do_mpc.graphics.Graphics(simulator.data)
%%capture
fig, ax = plt.subplots(5, sharex=True, figsize=(16,9))
fig.align_ylabels()
for g in [sim_graphics,mpc_graphics]:
# Plot the state on axis 1 to 4:
g.add_line(var_type='_x', var_name='X_s', axis=ax[0], color='#1f77b4')
g.add_line(var_type='_x', var_name='S_s', axis=ax[1], color='#1f77b4')
g.add_line(var_type='_x', var_name='P_s', axis=ax[2], color='#1f77b4')
g.add_line(var_type='_x', var_name='V_s', axis=ax[3], color='#1f77b4')
# Plot the control input on axis 5:
g.add_line(var_type='_u', var_name='inp', axis=ax[4], color='#1f77b4')
ax[0].set_ylabel(r'$X_s~[\si[per-mode=fraction]{\mole\per\litre}]$')
ax[1].set_ylabel(r'$S_s~[\si[per-mode=fraction]{\mole\per\litre}]$')
ax[2].set_ylabel(r'$P_s~[\si[per-mode=fraction]{\mole\per\litre}]$')
ax[3].set_ylabel(r'$V_s~[\si[per-mode=fraction]{\mole\per\litre}]$')
ax[4].set_ylabel(r'$u_{\text{inp}}~[\si[per-mode=fraction]{\cubic\metre\per\minute}]$')
ax[4].set_xlabel(r'$t~[\si[per-mode=fraction]{\minute}]$')
%%capture
n_steps = 100
for k in range(n_steps):
u0 = mpc.make_step(x0)
y_next = simulator.make_step(u0)
x0 = estimator.make_step(y_next)
from matplotlib.animation import FuncAnimation, FFMpegWriter, ImageMagickWriter
# The function describing the gif:
def update(t_ind):
sim_graphics.plot_results(t_ind)
mpc_graphics.plot_predictions(t_ind)
mpc_graphics.reset_axes()
if False:
anim = FuncAnimation(fig, update, frames=n_steps, repeat=False)
gif_writer = ImageMagickWriter(fps=10)
anim.save('anim_batch_reactor_final.gif', writer=gif_writer)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span style="color
Step2: <span style="color
Step3: <span style="color
Step4: <span style="color
Step5: <span style="color
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
pd.set_option('max_columns', 50)
mpl.rcParams['lines.linewidth'] = 2
%matplotlib inline
data = pd.read_csv('/Users/crucker/Desktop/clv_transactions.csv')
data.head(6)
data.tail(6)
Transactions = data['CustomerID'].count()
Customers = data['CustomerID'].max()
MinTransactionDate = data['TransactionDate'].min()
MaxTransactionDate = data['TransactionDate'].max()
Amount = data['Amount'].sum()
summary = [Transactions, Customers, MinTransactionDate, MaxTransactionDate, round(Amount, 2)]
summary
data = {'Transactions': [4181],
'Customers': [1000],
'MinTransactionDate': ['2010-01-04'],
'MaxTransactionDate': ['2015-12-31'],
'Amount': [33729.91]}
df = pd.DataFrame(data, index = [''])
df
TransactionsPerCustomer = round(Transactions / Customers, 2)
TransactionsPerCustomer
AmountPerTransaction = round(Amount / Transactions, 2)
AmountPerTransaction
AmountPerCustomer = round(Amount / Customers, 2)
AmountPerCustomer
data = {'TransactionsPerCustomer': [4.0],
'AmountPerTransaction': [8.07],
'AmountPerCustomer': [33.73]}
df = pd.DataFrame(data, index = [''])
df
more_summary = [TransactionsPerCustomer, AmountPerTransaction, AmountPerCustomer]
more_summary
data.loc[data['Amount'] >= 29.99]
import seaborn as sns
sns.set(color_codes=True)
plt.title('Distribution of Transaction Amounts', fontsize=14, fontweight="bold")
sns.distplot(data.Amount, color='#3498db')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can hide weights by passing force_weights=False (they still will be shown if it's impossible to highlight text)
Step2: Show explanations for the winning class for first 10 documents from test data
Step3: Now use a vectorizer that skips stopwords
Step4: Words such as "the", "in", "of" are not used as features and are not highlighted
|
<ASSISTANT_TASK:>
Python Code:
show_html_expl(explain_prediction(clf, test['data'][2], vec, target_names=train['target_names']),
force_weights=True)
show_html_expl(explain_prediction(clf, test['data'][4], vec, target_names=train['target_names']), force_weights=False)
import numpy as np
for doc in test['data'][:10]:
expl = explain_prediction(clf, doc, vec, target_names=train['target_names'], top_targets=1)
show_html_expl(expl, force_weights=False)
vec_stop = TfidfVectorizer(stop_words='english')
clf_stop = LogisticRegressionCV()
pipeline_stop = Pipeline([('vec', vec_stop), ('clf', clf_stop)])
pipeline_stop.fit(train['data'], train['target'])
show_html_expl(explain_prediction(clf_stop, test['data'][4], vec_stop, target_names=train['target_names']), force_weights=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) A closer look
Step2: 2) Count the leaves
Step3: 3) Which move will the agent select?
Step4: 4) Examine the assumptions
Step5: 5) Submit to the competition
|
<ASSISTANT_TASK:>
Python Code:
from learntools.core import binder
binder.bind(globals())
from learntools.game_ai.ex3 import *
#_COMMENT_IF(PROD)_
q_1.hint()
# Check your answer (Run this code cell to receive credit!)
q_1.solution()
# Fill in the blank
num_leaves = ____
# Check your answer
q_2.check()
#%%RM_IF(PROD)%%
num_leaves = 7*7*7
q_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_2.hint()
#_COMMENT_IF(PROD)_
q_2.solution()
# Fill in the blank
selected_move = ____
# Check your answer
q_3.check()
#%%RM_IF(PROD)%%
selected_move = 'three'
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
selected_move = 1
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
selected_move = 33
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
selected_move = 3
q_3.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_3.hint()
#_COMMENT_IF(PROD)_
q_3.solution()
#_COMMENT_IF(PROD)_
q_4.hint()
# Check your answer (Run this code cell to receive credit!)
q_4.solution()
def my_agent(obs, config):
# Your code here: Amend the agent!
import random
valid_moves = [col for col in range(config.columns) if obs.board[col] == 0]
return random.choice(valid_moves)
# Run this code cell to get credit for creating an agent
q_5.check()
import inspect
import os
def write_agent_to_file(function, file):
with open(file, "a" if os.path.exists(file) else "w") as f:
f.write(inspect.getsource(function))
print(function, "written to", file)
write_agent_to_file(my_agent, "submission.py")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Head() Tail()
Step2: 属性和 ndarray
Step3: 只想得到对象中的数据而忽略index和columns,使用values属性就可以
Step4: 如果DataFrame或Panel对象的数据类型相同(比如都是 int64),修改object.values相当于直接修改原对象的值。如果数据类型不相同,则根本不能对values属性返回值进行赋值。
Step5: 填充缺失值
Step6: 灵活的比较操作
Step7: 意思操作返回一个和输入对象同类型的对象,值类型为bool,返回结果可以用于检索。
Step8: 同样可以对降维后的结果再进行降维。
Step9: 使用empty属性检测一个pandas对象是否为空。
Step10: 对于只含有一个元素的pandas对象,对其进行布尔检测,使用bool()
Step11: 比较对象是否相等
Step12: 为什么df + df == df*2 返回的结果含有False?因为NaN和NaN比较厚结果为False!
Step13: 还好pandas提供了equals()方法解决上面NaN之间不想等的问题。
Step14: 注意:
Step15: 不同类型的对象之间 逐元素比较
Step16: 不同类型的对象(比如pandas数据结构、numpy数组)之间进行逐元素的比较也是没有问题的,前提是两个对象的shape要相同。
Step17: 但要知道不同shape的numpy数组之间是可以直接比较的!因为广播!即使无法广播,也不会Error而是返回False。
Step18: combine_first()
Step19: 解释:
Step20: 统计相关 的方法
Step21: 所有的这些方法都有skipna参数,含义是计算过程中是否剔除缺失值,skipna默认值为True。
Step22: 这些函数可以参与算术和广播运算。
Step23: 注意cumsum() cumprod()方法 保留NA值的位置。
Step24: 下面列出常用的方法及其描述。提醒每一个方法都有一个level参数用于具有层次索引的对象。
Step25: descrieb(), 数据摘要
Step26: 默认describe()只包含25%, 50%, 75%, 也可以通过percentiles参数进行指定。
Step27: 如果Series内数据是非数值类型,describe()也能给出一定的统计结果
Step28: 如果DataFrame对象有的列是数值类型,有的列不是数值类型,describe()仅对数值类型的列进行计算。
Step29: 如果非要知道非数值列的统计指标呢?describe提供了include参数,取值范围{'object', 'number', 'all'}。
Step30: 最大/最小值对应的index
Step31: 如果多个数值都是最大值或最小值,idxmax() idxmin()返回最大值、最小值第一次出现对应的索引值
Step32: 实际上,idxmin和idxmax就是NumPy中的argmin和argmax。
Step33: 虽然前面介绍过mode()方法了,看两个例子吧:
Step34: 区间离散化
Step35: qcut()方法计算样本的分位数,比如我们可以将正态分布的数据 进行四分位数离散化:
Step36: 离散区间也可以用极限定义
Step37: 函数应用
Step38: 上面一行代码推荐用下面的等价写法
Step39: 注意 f g h三个方法中DataFrame都是作为第一个参数。如果DataFrame作为第二个参数呢?方法是为pipe提供(callable, data_keyword),pipe会自动调用DataFrame对象。
Step40: 灵活运用apply()方法可以统计出数据集的很多特性。比如,假设我们希望从数据中抽取每一列最大值的索引值。
Step41: apply()方法当然支持接收其他参数了,比如下面的例子:
Step42: 另一个有用的特性是对DataFrame对象传递Series方法,然后针对DF对象的每一列或每一行执行 Series内置的方法!
Step43: 对单个元素应用Python方法
Step44: Series.map()还有一个功能是模仿merge(), join()
Step45: reindex和改变label
Step46: 对于DataFrame来说,你可以同时改变列名和索引值。
Step47: 如果只想改变列或者索引的label,DataFrame也提供了reindex_axis()方法,接收label和axis。
Step48: 上面一行代码顺便说明了Series的索引和DataFrame的索引是同一类的实例。
Step49: 使用align() 是两个对象相互对齐
Step50: 对于DataFrame来说,join方法默认会应用到索引和列名。
Step51: align()也含有一个axis参数,指定仅对于某一坐标轴进行对齐。
Step52: DataFrame.align()同样能接收Series对象,此时axis指的是DataFrame对象的索引或列。
Step53: 重索引时顺便填充数值
Step54: method参数要求索引必须是有序的:递增或递减。
Step55: 二者的区别是:如果索引不是有序的,reindex()会报错,而fillna()和interpolate()不会检查索引是否有序。
Step56: 移除某些索引值
Step57: 重命名索引值
Step58: 唯一的要求是传入的函数调用索引值时必须有一个返回值,如果你传入的是字典或Series,要求是索引值必须是其键值。这点很好理解。
Step59: 默认情况下修改的仅仅是副本,如果想对原对象索引值修改,inplace=True.
Step60: 迭代操作 Iteration
Step61: pandas对象也有类似字典的iteritems()方法来迭代(key, value)。
Step62: iterrows()
Step63: itertuples()
Step64: .dt 访问器
Step65: 改变时间的格式也很方便,Series.dt.strftime()
Step66: ## 字符串处理方法
Step67: 排序
Step68: 按照值排序
Step69: 通过na_position参数处理NA值。
Step70: searchsorted()
Step71: 最小/最大值
Step72: 从v0.17.0开始,DataFrame也有了以上两个方法。
Step73: 多索引列 排序
Step74: 复制
Step75: Series同样有dtypes属性
Step76: 如果pandas对象的一列中有多种数据类型,dtype返回的是能兼容所有数据类型的类型,object范围最大的。
Step77: get_dtype_counts()方法返回DataFrame中每一种数据类型的列数。
Step78: 数值数据类型可以在ndarray,Series和DataFrame中传播。
Step79: 数据类型的默认值
Step80: Numpy中数值的具体类型则要依赖于平台。
Step81: upcasting
Step82: astype()方法
Step83: 对object类型进行转型
Step84: 基于dtype 选择列
Step85: select_dtypes()有两个参数:include, exclude。含义是要选择的列的dtype和不选择列的dtype。
Step86: 如果要选择字符串类型的列,必须使用object类型。
Step87: 如果想要知道某种数据类型的所有子类型,比如numpy.number类型,你可以定义如下的方法:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
index = pd.date_range('1/1/2000', periods=8)
s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
df = pd.DataFrame(np.random.randn(8, 3), index=index, columns=['A', 'B', 'C'])
wp = pd.Panel(np.random.randn(2,5,4), items=['Item1', 'Item2'], major_axis=pd.date_range('1/1/2000',periods=5),
minor_axis=['A', 'B', 'C', 'D'])
long_series = pd.Series(np.random.randn(1000))
long_series.head()
long_series.tail(3)
df[:2]
df.columns = [x.lower() for x in df.columns] #将列名重置为小写
df
s.values
df.values
type(df.values)
wp.values
df = pd.DataFrame({'one' : pd.Series(np.random.randn(3), index=['a', 'b', 'c']),
'two' : pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']),
'three' : pd.Series(np.random.randn(3), index=['b', 'c', 'd'])})
df
row = df.ix[1]
row
column = df['two']
column
df.sub(row, axis='columns')
df.sub(row, axis=1)
df.sub(row, axis='index')
df.sub(row, axis=0)
df
df2 = pd.DataFrame({'one' : pd.Series(np.random.randn(3), index=['a', 'b', 'c']),
'two' : pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']),
'three' : pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd'])})
df2
df + df2
df.add(df2, fill_value=0) #注意['a', 'three']不是NaN
df.gt(df2)
df2.ne(df)
df>0
(df>0).all() #与操作
(df > 0).any()#或操作
(df > 0).any().any()
df.empty
pd.DataFrame(columns=list('ABC')).empty
pd.Series([True]).bool()
pd.Series([False]).bool()
pd.DataFrame([[True]]).bool()
pd.DataFrame([[False]]).bool()
df + df == df*2
(df+df == df*2).all()
np.nan == np.nan
(df+df).equals(df*2)
df1 = pd.DataFrame({'c':['f',0,np.nan]})
df1
df2 = pd.DataFrame({'c':[np.nan, 0, 'f']}, index=[2,1,0])
df2
df1.equals(df2)
df1.equals(df2.sort_index()) #对df2的索引排序,然后再比较
pd.Series(['foo', 'bar', 'baz']) == 'foo'
pd.Index(['foo', 'bar', 'baz']) == 'foo'
pd.Series(['foo', 'bar', 'baz']) == pd.Index(['foo', 'bar', 'qux'])
pd.Series(['foo', 'bar', 'baz']) == np.array(['foo', 'bar', 'qux'])
pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar']) #长度不相同
np.array([1,2,3]) == np.array([2])
np.array([1, 2, 3]) == np.array([1, 2])
df1 = pd.DataFrame({'A' : [1., np.nan, 3., 5., np.nan],
'B' : [np.nan, 2., 3., np.nan, 6.]})
df1
df2 = pd.DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],
'B' : [np.nan, np.nan, 3., 4., 6., 8.]})
df2
df1.combine_first(df2)
combiner = lambda x,y: np.where(pd.isnull(x), y,x)
df1.combine(df2, combiner)
df
df.mean() #axis=0, 计算每一列的平均值
df.mean(1) #计算每一行的平均值
df.sum(0, skipna=False)
df.sum(axis=1, skipna=True)
ts_stand = (df-df.mean())/df.std()
ts_stand.std()
xs_stand = df.sub(df.mean(1), axis=0).div(df.std(1), axis=0)
xs_stand.std(1)
df.cumsum()
series = pd.Series(np.random.randn(500))
series[20:500]=np.nan
series[10:20]=5
series.nunique()
series
series = pd.Series(np.random.randn(1000))
series[::2]=np.nan
series.describe()
frame = pd.DataFrame(np.random.randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e'])
frame.ix[::2]=np.nan
frame.describe()
series.describe(percentiles=[.05, .25, .75, .95])
s = pd.Series(['a', 'a', 'b', 'b', 'a', 'a', np.nan, 'c', 'd', 'a'])
s.describe()
frame = pd.DataFrame({'a':['Yes', 'Yes', 'NO', 'No'], 'b':range(4)})
frame.describe()
frame.describe(include=['object']) #只对非数值列进行统计计算
frame.describe(include=['number'])
frame.describe(include='all')#'all'不是列表
s1 = pd.Series(np.random.randn(5))
s1
s1.idxmin(), s1.idxmax() #最小值:-0.296405, 最大值:1.735420
df1 = pd.DataFrame(np.random.randn(5,3), columns=list('ABC'))
df1
df1.idxmin(axis=0)
df1.idxmax(axis=1)
df3 = pd.DataFrame([2, 1, 1, 3, np.nan], columns=['A'], index=list('edcba'))
df3
df3['A'].idxmin()
data = np.random.randint(0, 7, size=50)
data
s = pd.Series(data)
s.value_counts()
pd.value_counts(data) #也是全局方法
s5 = pd.Series([1,1,3,3,3,5,5,7,7,7])
s5.mode()
df5 = pd.DataFrame({"A": np.random.randint(0, 7, size=50),
"B": np.random.randint(-10, 15, size=50)})
df5
df5.mode()
arr = np.random.randn(20)
factor = pd.cut(arr, 4)
factor
factor = pd.cut(arr, [-5, -1, 0, 1, 5]) #输入 离散区间
factor
arr = np.random.randn(30)
factor = pd.qcut(arr, [0, .25, .5, .75, 1])
factor
pd.value_counts(factor)
arr = np.random.randn(20)
factor = pd.cut(arr, [-np.inf, 0, np.inf])
factor
#f, g 和h是三个方法,接收DataFrame对象,返回DataFrame对象
f(g(h(df), arg1=1), arg2=2, arg3=3)
(df.pipe(h).pipe(g, arg1=1).pipe(f, arg2=2, arg3=3))
df.apply(np.mean)
df.apply(np.mean, axis=1)
df.apply(lambda x: x.max() - x.min())
df.apply(np.cumsum)
df.apply(np.exp)
tsdf = pd.DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=pd.date_range('1/1/2000', periods=1000))
tsdf
tsdf.apply(lambda x:x.idxmax())
def subtract_and_divide(x, sub, divide=1):
return (x - sub)/divide
df.apply(subtract_and_divide, args=(5,),divide=3)
df
df.apply(pd.Series.interpolate)
df4 = pd.DataFrame(np.random.randn(4, 3),index=['a','b','c','d'],columns=['one', 'two', 'three'])
df4
f = lambda x:len(str(x))
df4['one'].map(f)
df4.applymap(f)
s = pd.Series(['six', 'seven', 'six', 'seven', 'six'], index=['a', 'b', 'c', 'd', 'e'])
t = pd.Series({'six':6., 'seven':7.})
s
t
s.map(t)
s = pd.Series(np.random.randn(5), index=['a','b','c','d','e'])
s
s.reindex(['e', 'b', 'f', 'd'])
df
df.reindex(index=['c', 'f', 'b'], columns=['three', 'two', 'one'])
rs = s.reindex(df.index)
rs
rs.index is df.index
df2 = pd.DataFrame(np.random.randn(3, 2),index=['a','b','c'],columns=['one', 'two'])
df2
df
df.reindex_like(df2)
s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
s1 = s[:4]
s2 = s[1:]
s1
s2
s1.align(s2)
s1.align(s2, join='inner') #交集是 'b', 'c', 'd'
df
df2 = df.iloc[:5,:2]
df2
df.align(df2, join='inner')
df.align(df2)
df.align(df2, join='inner', axis=0)
df.align(df2.ix[0], axis=1)
rng = pd.date_range('1/3/2000', periods=8)
ts = pd.Series(np.random.randn(8), index=rng)
ts2 = ts[[0, 3, 6]]
ts
ts2
ts2.reindex(ts.index)
ts2.reindex(ts.index, method='ffill') #索引小那一行的数值填充NaN
ts2.reindex(ts.index, method='bfill') #索引大的非NaN的数值填充NaN
ts2.reindex(ts.index, method='nearest')
ts2.reindex(ts.index).fillna(method='ffill')
ts2.reindex(ts.index, method='ffill', limit=1)
ts2
ts
ts2.reindex(ts.index, method='ffill', tolerance='1 day')
df
df.drop(['A'], axis=1)
s
s.rename(str.upper)
df = pd.DataFrame({'one' : pd.Series(np.random.randn(3), index=['a', 'b', 'c']),
'two' : pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']),
'three' : pd.Series(np.random.randn(3), index=['b', 'c', 'd'])})
df
df.rename(columns={'one' : 'foo', 'two' : 'bar'},
index={'a' : 'apple', 'b' : 'banana', 'd' : 'durian'})
s.rename('sclar-name')
df = pd.DataFrame({'col1' : np.random.randn(3), 'col2' : np.random.randn(3)},
index=['a', 'b', 'c'])
df
for col in df: #产生的是列名
print col
for item, frame in df.iteritems():
print item, frame
for row_index, row in df.iterrows():
print row_index, row
for row in df.itertuples():
print row
s = pd.Series(pd.date_range('20160101 09:10:12', periods=4))
s
s.dt.hour
s.dt.second
s.dt.day
s = pd.Series(pd.date_range('20130101', periods=4))
s
s.dt.strftime('%Y/%m/%d')
s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
s
s.str.lower()
unsorted_df = df.reindex(index=['a', 'd', 'c', 'b'],
columns=['three', 'two', 'one'])
unsorted_df
unsorted_df.sort_index()
unsorted_df.sort_index(ascending=False)
unsorted_df.sort_index(axis=1)
unsorted_df['three'].sort_index() # Series
df1 = pd.DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]})
df1
df1.sort_values(by='two')
df1.sort_values(by=['one', 'two'])
s[2] = np.nan
s.sort_values()
s.sort_values(na_position='first') #将NA放在前面
ser = pd.Series([1,2,3])
ser.searchsorted([0, 3]) #元素0下标是0, 元素3下标是2。注意不同元素之间是独立的,所以元素3的位置是2而不是插入0后的3.
ser.searchsorted([0, 4])
ser.searchsorted([1, 3], side='right')
ser.searchsorted([1, 3], side='left')
ser = pd.Series([3, 1, 2])
ser.searchsorted([0, 3], sorter=np.argsort(ser))
s = pd.Series(np.random.permutation(10))
s
s.sort_values()
s.nsmallest(3)
s.nlargest(3)
df = pd.DataFrame({'a': [-2, -1, 1, 10, 8, 11, -1],
'b': list('abdceff'),
'c': [1.0, 2.0, 4.0, 3.2, np.nan, 3.0, 4.0]})
df
df.nlargest(5, 'a') #列 'a'最大的3个值
df.nlargest(5, ['a', 'c'])
df.nsmallest(3, 'a')
df.nsmallest(5, ['a', 'c'])
df1 = pd.DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]})
df1.columns = pd.MultiIndex.from_tuples([('a','one'),('a','two'),('b','three')])
df1
df1.sort_values(by=('a','two'))
df = pd.DataFrame(dict(A = np.random.rand(3),
B = 1,
C = 'foo',
D = pd.Timestamp('20010102'),
E = pd.Series([1.0]*3).astype('float32'),
F = False,
G = pd.Series([1]*3,dtype='int8')))
df
df.dtypes
df['A'].dtype
pd.Series([1,2,3,4,5,6.])
pd.Series([1,2,3,6.,'foo'])
df.get_dtype_counts()
df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float32')
df1
df1.dtypes
df2 = pd.DataFrame(dict( A = pd.Series(np.random.randn(8), dtype='float16'),
B = pd.Series(np.random.randn(8)),
C = pd.Series(np.array(np.random.randn(8), dtype='uint8')) )) #这里是float16, uint8
df2
df2.dtypes
pd.DataFrame([1, 2], columns=['a']).dtypes
pd.DataFrame({'a': [1, 2]}).dtypes
pd.DataFrame({'a': 1}, index=list(range(2))).dtypes
frame = pd.DataFrame(np.array([1, 2])) #如果是在32位系统,数据类型int32
df1.dtypes
df2.dtypes
df1.reindex_like(df2).fillna(value=0.0).dtypes
df3 = df1.reindex_like(df2).fillna(value=0.0) + df2
df3.dtypes
df3
df3.dtypes
df3.astype('float32').dtypes
df3['D'] = '1.'
df3['E'] = '1'
df3
df3.dtypes #现在'D' 'E'两列都是object类型
df3.convert_objects(convert_numeric=True).dtypes
df3['D'] = df3['D'].astype('float16')
df3['E'] = df3['E'].astype('int32')
df3.dtypes
df = pd.DataFrame({'string': list('abc'),
'int64': list(range(1, 4)),
'uint8': np.arange(3, 6).astype('u1'),
'float64': np.arange(4.0, 7.0),
'bool1': [True, False, True],
'bool2': [False, True, False],
'dates': pd.date_range('now', periods=3).values,
'category': pd.Series(list("ABC")).astype('category')})
df
df['tdeltas'] = df.dates.diff()
df['uint64'] = np.arange(3, 6).astype('u8')
df['other_dates'] = pd.date_range('20130101', periods=3).values
df['tz_aware_dates'] = pd.date_range('20130101', periods=3, tz='US/Eastern')
df
df.dtypes
df.select_dtypes(include=[bool])
df.select_dtypes(include=['bool'])
df.dtypes
df.select_dtypes(include=['number', 'bool'], exclude=['unsignedinteger'])
df.select_dtypes(include=['object'])
def subdtypes(dtype):
subs = dtype.__subclasses__()
if not subs:
return dtype
return [dtype, [subdtypes(dt) for dt in subs]]
subdtypes(np.generic)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <b>Supply input data and set some plotting parameters.</b>
Step2: <b>Set up some characteristics for plotting.</b>
Step3: <b>Read in the flight and radar data</b>
Step4: <b>Make a cross-section following the flight track displayed in the top panel and use the vertical wind velocity field.</b>
Step5: <b>Now let's make a vertical cross-section along lon/lat pairs of reflectivity</b>
Step6: <b>Here's an alternative method to produce the same plot above. And notice the second plot has discrete levels by setting the <i>discrete_cmap_levels</i> keyword.</b>
|
<ASSISTANT_TASK:>
Python Code:
# Load the needed packages
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import awot
from awot.graph.common import create_basemap
from awot.graph import RadarHorizontalPlot, RadarVerticalPlot, FlightLevel
%matplotlib inline
# Set the project name
Project="DYNAMO"
# Choose what file to process
yymmdd, modn = '111124', '0351'
# Set the data directory
fDir = "/Users/guy/data/dynamo/"+yymmdd+"I/"
# Construct the full path name for windsyn NetCDF file
P3Radf = str(glob(fDir+"/*"+modn+"*windsyn*.nc")).strip('[]')
# Construct the full path name for Flight level NetCDF file
FltLevf = str(glob(fDir+"20*"+yymmdd+"*_DJ*.nc")).strip('[]')
corners = [77.8, -2.0, 79.6, -0.2]
figtitle = '24 Nov RCE'
# Set up some characteristics for plotting
# Set map projection to use
proj = 'cea'
Wbarb_Spacing = 300 # Spacing of wind barbs along flight path (sec)
# Choose the X-axis time step (in seconds) where major labels will be
XlabStride = 60
# Optional settings
start_time = "2011-11-24 03:51:00"
end_time = "2011-11-24 04:57:00"
# Map spacing
dLon = 0.5
dLat = 0.5
# Should landmarks be plotted? [If yes, then modify the section below
Lmarks=True
if Lmarks:
# Create a list of Landmark data
LocMark = []
# Add locations as [ StringName, Longitude, Latitude ,XlabelOffset, YlabelOffset]
LocMark.append(['Diego Garcia', 72.4160, -7.3117, 0.1, -0.6])
LocMark.append(['R/V Revelle', 80.5010, 0.12167, -0.4, -0.6])
LocMark.append(['Gan', 73.1017, -0.6308, -0.9, 0.0])
LocMark.append(['R/V Marai', 80.50, -7.98, -0.1, -0.6])
# Build a few variables for plotting the labels
# Build arrays for plotting
Labels = []
LabLons = []
LabLats = []
XOffset = []
YOffset = []
for L1, L2, L3, L4, L5 in LocMark:
Labels.append(L1)
LabLons.append(L2)
LabLats.append(L3)
XOffset.append(L4)
YOffset.append(L5)
# Add PPI plot at 2 km level
cappi_ht = 2000.
fl1 = awot.io.read_netcdf(fname=FltLevf[1:-1], platform='p-3')
r1 = awot.io.read_windsyn_tdr_netcdf(fname=P3Radf[1:-1], field_mapping=None)
fig, (axPPI, axXS) = plt.subplots(2, 1, figsize=(8, 8))
# Set the map for plotting
bm1 = create_basemap(corners=corners, proj=proj, resolution='l', area_thresh=1.,
lat_spacing=dLat, lon_spacing=dLon, ax=axPPI)
# Create a Flightlevel instance for the track
flp1 = FlightLevel(fl1, basemap=bm1)
flp1.plot_trackmap(start_time=start_time, end_time=end_time,
min_altitude=50., max_altitude= 8000.,
addlegend=False, addtitle=False, ax=axPPI)
# Create a RadarGrid
rgp1 = RadarHorizontalPlot(r1, basemap=bm1)
rgp1.plot_cappi('reflectivity', cappi_ht, vmin=15., vmax=60., title=' ',
#rgp1.plot_cappi('Uwind', 2., vmin=-20., vmax=20., title=' ',
# cmap='RdBu_r',
color_bar=True, cb_pad="10%", cb_loc='right', cb_tick_int=4,
ax=axPPI)
rgp1.overlay_wind_vector(height_level=cappi_ht, vscale=200, vtrim=6, qcolor='0.50',
refUposX=.75, refUposY=.97, plot_km=True)
flp1.plot_radar_cross_section(r1, 'Wwind', plot_km=True,
start_time=start_time, end_time=end_time,
vmin=-3., vmax=3., title=' ',
cmap='RdBu_r',
color_bar=True, cb_orient='vertical', cb_tick_int=4,
x_axis_array='time',
ax=axXS)
fig, (axPPI2, axXS2) = plt.subplots(2, 1, figsize=(7, 7))
# Set the map for plotting
bm2 = create_basemap(corners=corners, proj=proj, resolution='l', area_thresh=1.,
lat_spacing=dLat, lon_spacing=dLon, ax=axPPI2)
# Create a Flightlevel instance for the track
flp2 = FlightLevel(fl1, basemap=bm2)
flp2.plot_trackmap(start_time=start_time, end_time=end_time,
min_altitude=50., max_altitude= 8000.,
addlegend=False, addtitle=False, ax=axPPI2)
# Create a RadarGrid
rgph = RadarHorizontalPlot(r1, basemap=bm2)
# Add PPI plot at 2 km
rgph.plot_cappi('reflectivity', cappi_ht, vmin=15., vmax=60., title=' ',
color_bar=True, cb_pad="10%", cb_loc='right', cb_tick_int=4,
ax=axPPI2)
rgph.overlay_wind_vector(height_level=2., vscale=200, vtrim=6, qcolor='0.50')
# Add Cross-sectional line to horizontal plot
rgph.plot_line_geo([78.3, 79.0], [-1.1, -1.5], lw=4, alpha=.8, line_style='w-',
label0=True, label_offset=(0.05,-0.05))
rgph.plot_cross_section('reflectivity', (78.3, -1.1), (79.0, -1.5),
vmin=15., vmax=60., title=' ',
color_bar=True, cb_orient='vertical', cb_tick_int=4,
plot_km=True, ax=axXS2)
# Alternatively the commented out code below will also display the plot
#rgpv = RadarVerticalPlot(fl1, instrument='tdr_grid')
# Add the cross-section along those coordinates
#rgpv.plot_cross_section('dBZ', (78.3, -1.1), (79.0, -1.5),
# vmin=15., vmax=60., title=' ',
# color_bar=False, cb_orient='vertical', cb_tick_int=4,
# ax=axXS)
fig, (axPPI3, axXS3) = plt.subplots(2, 1, figsize=(7, 7))
# Set the map for plotting
bm3 = create_basemap(corners=corners, proj=proj, resolution='l', area_thresh=1.,
lat_spacing=dLat, lon_spacing=dLon, ax=axPPI3)
# Create a Flightlevel instance for the track
flp2 = FlightLevel(fl1, basemap=bm3)
flp2.plot_trackmap(start_time=start_time, end_time=end_time,
min_altitude=50., max_altitude= 8000.,
addlegend=False, addtitle=False, ax=axPPI3)
# Create a RadarGrid
rgph = RadarHorizontalPlot(r1, basemap=bm3)
# Add PPI plot at 2 km level
rgph.plot_cappi('reflectivity', cappi_ht, vmin=15., vmax=60., title=' ',
color_bar=True, cb_pad="10%", cb_loc='right', cb_tick_int=4,
ax=axPPI3)
rgpv = RadarVerticalPlot(r1, basemap=bm3)
# Add the cross-section along those coordinates
rgpv.plot_cross_section('reflectivity', (78.3, -1.1), (79.0, -1.5),
vmin=15., vmax=60., title=' ',
color_bar=True, cb_orient='vertical', cb_tick_int=4,
discrete_cmap_levels=[10., 15., 20., 25., 30., 35., 40., 45., 50., 55., 60.], ax=axXS3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interaktive Hilfe
Step2: Die weitere Funktionalität der Pandas-Bibliothek können wir erkunden, indem wir die Methoden von Pandas ansehen. Dazu verwenden wir pd. und nutzen die integrierte Autovervollständigung von Jupyter mittels der Tabulatortaste Tab, um zu sehen, welche Methoden uns Pandas bietet. Gehen wir dann mit der Pfeiltaste unten z. B. auf Categorical, drücken Enter und schließend Shift+ Tab, dann erscheint die Signatur des entsprechenden Funktionalität und der Ausschnitt der Hilfedokumentation. Bei zweimaligem Drücken von Shift + Tab erscheint die Hilfe vollständig.
Step3: Laden von Daten
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd?
pd.Categorical
cdr = pd.read_csv('data/CDR_data.csv')
cdr.head()
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('ggplot') # Look Pretty
df.info()
df.CallTimestamp = pd.to_datetime(df.CallTimestamp)
df.Duration = pd.to_timedelta(df.Duration)
df.info()
phoneowners = pd.read_excel("data/phoneowners.xlsx")
phoneowners.head()
phoneowners.info()
#Join them
phoneowners.columns = ['Name', 'In']
joined = pd.merge(df, phoneowners, on='In')
joined.head()
#But where are those towers?
#No for the tower data
towers = pd.read_csv("data/towers.csv")
joined = pd.merge(joined, towers, on='TowerID')
joined.head()
# result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])
joined['CallTimestamp'] = pd.to_datetime(joined['CallTimestamp'])
weekdays = joined['CallTimestamp'].dt.dayofweek.isin(Workweek) & joined['CallTimestamp'].dt.hour.isin(range(8,18))
dfweekdays = joined[weekdays]
dfweekdays.head()
targetname = 'John Doe'
user1 = joined[joined['Name'] == targetname]
#user1 = user1[weekdays]
user1 = user1.reset_index(drop=True)
user1.head()
# INFO: Plot all the call locations
%matplotlib inline
#user1[['TowerLon', 'TowerLat']].plot.scatter()
user1.plot.scatter(x='TowerLon', y='TowerLat', c='purple', alpha=0.12, title='Call Locations', s = 30)
#showandtell() # Comment this line out when you're ready to proceed
#
# INFO: The locations map above should be too "busy" to really wrap your head around. This is where domain expertise comes into play.
# Your intuition tells you that people are likely to behave differently on weekends:
#
# On Weekends:
# 1. People probably don't go into work
# 2. They probably sleep in late on Saturday
# 3. They probably run a bunch of random errands, since they couldn't during the week
# 4. They should be home, at least during the very late hours, e.g. 1-4 AM
#
# On Weekdays:
# 1. People probably are at work during normal working hours
# 2. They probably are at home in the early morning and during the late night
# 3. They probably spend time commuting between work and home everyday
#
# TODO: Add more filters to the user1 slice you created. Add bitwise logic so that you're only examining records that came in on
# weekends (sat/sun).
#
# .. your code here ..
user1['DOW'] = user1.CallTimestamp.dt.strftime("%a")
user1 = user1[(user1.DOW == 'Sat') | (user1.DOW == 'Sun')]
user1.head()
#
# TODO: Further filter it down for calls that are came in either before 6AM OR after 10pm (22:00:00). You can use < and > to compare
# the string times, just make sure you code them as military time strings, eg: "06:00:00", "22:00:00":
# https://en.wikipedia.org/wiki/24-hour_clock
#
# You might also want to review the Data Manipulation section for this. Once you have your filtered slice, print out its length:
#
# .. your code here ..
user1 = user1[(user1.CallTimestamp < "06:00:00") | (user1.CallTimestamp > "22:00:00")]
user1.head()
#
# INFO: Visualize the dataframe with a scatter plot as a sanity check. Since you're familiar with maps, you know well that your
# X-Coordinate should be Longitude, and your Y coordinate should be the tower Latitude. Check the dataset headers for proper column
# feature names.
# https://en.wikipedia.org/wiki/Geographic_coordinate_system#Geographic_latitude_and_longitude
#
# At this point, you don't yet know exactly where the user is located just based off the cell phone tower position data; but
# considering the below are for Calls that arrived in the twilight hours of weekends, it's likely that wherever they are bunched up
# is probably near the caller's residence:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1.TowerLon,user1.TowerLat, c='g', marker='o', alpha=0.2)
ax.set_title('Weekend Calls (<6am or >10p)')
#showandtell() # TODO: Comment this line out when you're ready to proceed
#
# TODO: Run K-Means with a K=1. There really should only be a single area of concentration. If you notice multiple areas that are
# "hot" (multiple areas the usr spends a lot of time at that are FAR apart from one another), then increase K=2, with the goal being
# that one of the centroids will sweep up the annoying outliers; and the other will zero in on the user's approximate home location.
# Or rather the location of the cell tower closest to their home.....
#
# Be sure to only feed in Lat and Lon coordinates to the KMeans algo, since none of the other data is suitable for your purposes.
# Since both Lat and Lon are (approximately) on the same scale, no feature scaling is required. Print out the centroid locations and
# add them onto your scatter plot. Use a distinguishable marker and color.
#
# Hint: Make sure you graph the CORRECT coordinates. This is part of your domain expertise.
#
# .. your code here ..
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 2)
user1 = pd.concat([user1.TowerLon, user1.TowerLat], axis = 1)
labels = kmeans.fit_predict(user1)
centroids = kmeans.cluster_centers_
ax.scatter(x = centroids[:, 0], y = centroids[:, 1], c = 'r', marker = 'x', s = 100)
ax.figure
#showandtell() # TODO: Comment this line out when you're ready to proceed
#coordinates = "" + centroids[0][1].to_string().split('.')[0] + "°" + centroids[0][1].split('.')[1][0] + centroids[0][1].split('.')[1][1] + centroids[0][0]
#str.split(' ', 1 )
centroids
difference1 = centroids[0][1] - centroids[1][1]
difference2 = centroids[0][0] - centroids[1][0]
difference1 = 0.5 * difference1
difference2 = 0.5 * difference2
coordinate1 = centroids[0][1] + difference1
coordinate2 = centroids[0][0] + difference2
coordinates = str(coordinate1) + " " + str(coordinate2)
coordinates
#-96°90'92.4672"N 96°56'57.3"W
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
tf.reset_default_graph()
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding="same", activation=tf.nn.relu, name='conv1')
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), 2, padding="same", name='maxpool1')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding="same", activation=tf.nn.relu, name='conv2')
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), 2, padding="same", name='maxpool2')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding="same", activation=tf.nn.relu, name='conv3')
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), 2, padding="same", name='encoded')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, [7,7], name='upsample1')
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding="same", activation=tf.nn.relu, name='conv4')
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, [14,14], name='upsample2')
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding="same", activation=tf.nn.relu, name='conv5')
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, [28,28], name='upsample3')
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding="same", activation=tf.nn.relu, name='conv6')
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None, name='conv-logits')
#Now 28x28x1
print('MODEL:')
print(conv1)
print(maxpool1)
print(conv2)
print(maxpool2)
print(conv3)
print(encoded)
print(upsample1)
print(conv4)
print(upsample2)
print(conv5)
print(upsample3)
print(conv6)
print(logits)
print()
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 20
batch_size = 200
show_after = 50
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
if ii % show_after == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
tf.reset_default_graph()
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding="same", activation=tf.nn.relu, name='conv1')
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), 2, padding="same", name='maxpool1')
# Now 14x14x32
conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding="same", activation=tf.nn.relu, name='conv2')
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), 2, padding="same", name='maxpool2')
# Now 7x7x32
conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding="same", activation=tf.nn.relu, name='conv3')
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, (2,2), 2, padding="same", name='encoded')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, [7,7], name='upsample1')
# Now 7x7x16
conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding="same", activation=tf.nn.relu, name='conv4')
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, [14,14], name='upsample2')
# Now 14x14x16
conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding="same", activation=tf.nn.relu, name='conv5')
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, [28,28], name='upsample3')
# Now 28x28x32
conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding="same", activation=tf.nn.relu, name='conv6')
# Now 28x28x32
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None, name='conv-logits')
#Now 28x28x1
print('MODEL:')
print(conv1)
print(maxpool1)
print(conv2)
print(maxpool2)
print(conv3)
print(encoded)
print(upsample1)
print(conv4)
print(upsample2)
print(conv5)
print(upsample3)
print(conv6)
print(logits)
print()
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
show_after = 150
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
if ii % show_after == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Crear el contexto de Streaming. Con la configuración
Step2: Una vez instanciados los contextos, nos conectamos a la fuente de datos
Step3: Soporte de Window Operations y tuplas para conteo de etiquetas
Step4: Creamos un objeto namedtuple para almacenar las etiquetas y sus conteos.
Step5: Procesar el flujo de datos
Step6: Iniciar el proceso de Spark Streaming
Step7: Graficar las tendencias
|
<ASSISTANT_TASK:>
Python Code:
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import desc
from collections import namedtuple
# Namedtuple: https://pymotw.com/2/collections/namedtuple.html, http://stackoverflow.com/questions/2970608/what-are-named-tuples-in-python
import time
import matplotlib.pyplot as plt
import seaborn as sn
%matplotlib inline
from IPython import display
sc = SparkContext("local[2]", "TwitterTrend")
ssc = StreamingContext(sc, 10)
sqlContext = SQLContext(sc)
socket_stream = ssc.socketTextStream("localhost", 5555)
lines = socket_stream.window( 20 )
fields = ("tag", "count" )
Tweet = namedtuple( 'Tweet', fields )
( lines.flatMap( lambda text: text.split( " " ) )
.filter( lambda word: word.lower().startswith("#") )
.map( lambda word: ( word.lower(), 1 ) )
.reduceByKey( lambda a, b: a + b )
.map( lambda rec: Tweet( rec[0], rec[1] ) )
.foreachRDD( lambda rdd: rdd.toDF().sort( desc("count") )
.limit(10).registerTempTable("tweets") ) )
# Lazy evaluation
ssc.start()
for i in range(0,1000):
time.sleep( 1 )
top_10_tweets = sqlContext.sql( 'Select tag, count from tweets' )
top_10_df = top_10_tweets.toPandas()
display.clear_output(wait=True)
plt.figure( figsize = ( 10, 8 ) )
sn.barplot( x="count", y="tag", data=top_10_df)
plt.show()
ssc.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: One queue or two?
Step3: Test this function by creating a System object with lam=1/8 and mu=1/5.
Step5: Write an update function that takes as parameters x, which is the total number of customer in the store, including the one checking out; t, which is the number of minutes that have elapsed in the simulation, and system, which is a System object.
Step6: Test your function by calling it with x=1, t=0, and the System object you created. If you run it a few times, you should see different results.
Step8: Now we can run the simulation. Here's a version of run_simulation that creates a TimeSeries with the total number of customers in the store, including the one checking out.
Step9: Call run_simulation with your update function and plot the results.
Step11: After the simulation, we can compute L, which is the average number of customers in the system, and W, which is the average time customers spend in the store. L and W are related by Little's Law
Step12: Call compute_metrics with the results from your simulation.
Step13: Parameter sweep
Step15: Write a function that takes an array of values for lam, a single value for mu, and an update function.
Step16: Call your function to generate a SweepSeries, and plot it.
Step17: If we imagine that this range of values represents arrival rates on different days, we can use the average value of W, for a range of values of lam, to compare different queueing strategies.
Step19: Analysis
Step20: Use this function to plot the theoretical results, then plot your simulation results again on the same graph. How do they compare?
Step22: Multiple servers
Step23: Use this update function to simulate the system, plot the results, and print the metrics.
Step24: Since we have two checkout counters now, we can consider values for $\lambda$ that exceed $\mu$.
Step25: Use your sweep function to simulate the two server, one queue scenario with a range of values for lam.
Step27: Multiple queues
Step29: Write a version of run_simulation that works with this update function.
Step30: Test your functions by running a simulation with a single value of lam.
Step32: Sweep a range of values for lam, plot the results, and print the average wait time across all values of lam.
|
<ASSISTANT_TASK:>
Python Code:
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
# set the random number generator
np.random.seed(7)
# Solution
def make_system(lam, mu):
Make a System object.
lam: arrival rate, per minute
mu: service completion rate, per minute
returns: System object
# duration is 10 hours, expressed in minutes
return System(lam=lam, mu=mu, duration=10*60)
# Solution
interarrival_time = 8
service_time = 5
lam = 1 / interarrival_time
mu = 1 / service_time
system = make_system(lam, mu)
# Solution
def update_func1(x, t, system):
Simulate one time step.
x: number of people in the shop
t: time step
system: System object
# if there's a customer in service, check if they're done
if x > 0:
if flip(system.mu):
x -= 1
# check for an arrival
if flip(system.lam):
x += 1
return x
# Solution
update_func1(1, 0, system)
def run_simulation(system, update_func):
Simulate a queueing system.
system: System object
update_func: function object
x = 0
results = TimeSeries()
results[0] = x
for t in linrange(0, system.duration):
x = update_func(x, t, system)
results[t+1] = x
return results
# Solution
results = run_simulation(system, update_func1)
plot(results)
decorate(xlabel='Time (min)', ylabel='Customers')
def compute_metrics(results, system):
Compute average number of customers and wait time.
results: TimeSeries of queue lengths
system: System object
returns: L, W
L = results.mean()
W = L / system.lam
return L, W
# Solution
compute_metrics(results, system)
# Solution
num_vals = 101
lam_array = linspace(0.1*mu, 0.8*mu, num_vals)
lam_array
# Solution
def sweep_lam(lam_array, mu, update_func):
Run simulations with a range of values for `lam`
lam_array: array of values for `lam`
mu: probability of finishing a checkout
update_func: passed along to run_simulation
returns: SweepSeries of average wait time vs lam
sweep = SweepSeries()
for lam in lam_array:
system = make_system(lam, mu)
results = run_simulation(system, update_func)
L, W = compute_metrics(results, system)
sweep[lam] = W
return sweep
# Solution
sweep = sweep_lam(lam_array, mu, update_func1)
# Solution
plot(sweep, 'bo')
decorate(xlabel='Arrival late, lambda (per min)',
ylabel='Average time in system',
title='Single server, single queue')
# W_avg = sweep.mean()
def plot_W(lam_array, mu):
Plot the theoretical mean wait time.
lam_array: array of values for `lam`
mu: probability of finishing a checkout
W = 1 / (mu - lam_array)
plot(lam_array, W, 'g-')
# Solution
plot_W(lam_array, mu)
plot(sweep, 'bo')
decorate(xlabel='Arrival late, lambda (per min)',
ylabel='Average time in system',
title='Single server, single queue')
# Solution
def update_func2(x, t, system):
Simulate a single queue with two servers.
system: System object
# if both servers are busy, check whether the
# second is complete
if x > 1 and flip(system.mu):
x -= 1
# check whether the first is complete
if x > 0 and flip(system.mu):
x -= 1
# check for an arrival
if flip(system.lam):
x += 1
return x
# Solution
system = make_system(lam, mu)
run_simulation(system, update_func2)
plot(results)
decorate(xlabel='Time (min)', ylabel='Customers')
compute_metrics(results, system)
# Solution
lam_array = linspace(0.1*mu, 1.6*mu, num_vals)
# Solution
sweep = sweep_lam(lam_array, mu, update_func2)
W_avg = sweep.mean()
print('Average of averages = ', W_avg, 'minutes')
# Solution
plot(sweep, 'bo')
decorate(xlabel='Arrival late, lambda (per min)',
ylabel='Average time in system',
title='Multiple server, single queue')
# Solution
def update_func3(x1, x2, t, system):
Simulate two queues with one server each.
x1: number of customers in queue 1
x2: number of customers in queue 2
t: time step
system: System object
# if the first servers is busy, check it it's done
if x1 > 0 and flip(system.mu):
x1 -= 1
# if the second queue is busy, check if it's done
if x2 > 0 and flip(system.mu):
x2 -= 1
# check for an arrival
if flip(system.lam):
# join whichever queue is shorter
if x1 < x2:
x1 += 1
else:
x2 += 1
return x1, x2
# Solution
def run_simulation(system, update_func):
Simulate a queueing system.
system: System object
update_func: function object
x1, x2 = 0, 0
results = TimeSeries()
results[0] = x1 + x2
for t in linrange(0, system.duration):
x1, x2 = update_func(x1, x2, t, system)
results[t+1] = x1 + x2
return results
# Solution
system = make_system(lam, mu)
run_simulation(system, update_func3)
plot(results)
decorate(xlabel='Time (min)', ylabel='Customers')
compute_metrics(results, system)
# Solution
sweep = sweep_lam(lam_array, mu, update_func3)
W_avg = sweep.mean()
print('Average of averages = ', W_avg, 'minutes')
# Solution
plot(sweep, 'bo')
decorate(xlabel='Arrival late, lambda (per min)',
ylabel='Average time in system',
title='Multiple server, multiple queue')
# Solution
With two queues, the average of averages is slightly higher, most of the time. But the difference is small.
The two configurations are equally good as long as both servers are busy; the only time two lines is worse is if one queue is empty and the other contains more than one customer. In real life, if we allow customers to change lanes, that disadvantage can be eliminated.
From a theoretical point of view, one line is better. From a practical point of view, the difference is small and can be mitigated. So the best choice depends on practical considerations.
On the other hand, you can do substantially better with an express line for customers with short service times. But that's a topic for another notebook.
;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's examine some statistical values of the elevations (e.g. mean, min, max, standard deviation)
Step2: The mean error (standard deviation) looks very promissing (7 mm), but the range of the values is ~6 cm. On the other hand we have only 86362 epoch, instead of 86400 (24 * 60 * 60).
Step3: Leat's have a look to the data.
Step4: We can realize the data are noisy and there may be some periodicy in it. First let's smooth data with the median of moving 60 seconds long window, to remove random noise.
Step5: Before we go further analysing data, find and fill the gaps in the data set.
Step6: We have two gaps (13 seconds and 27 seconds). Using the indices (24781 and 35908) we can find the time of the gap
Step7: For further processing we have to fill these gaps. These two gaps are sorter than a minute, so we may use linear interpolation.
Step8: Let's display the first 5 minutes of original and smoothed data. Note the first minute missing from the smoothed data.
Step9: Now we start the spectral analysis of elevation time serie. First we remove the linear trend if there is any.
Step10: Let's try to find the significant spectrums in data using FFT. We'll convert data from time domain to frequency domain.
Step11: Let's display frequency space.
Step12: and the inverse FFT for the first 10 most significant frequences. Change the start and end variables in the next section of code to zoom and pan plots. Turn on/off original and smoothed data in the plot by commenting out the appropriate ax.plot line.
Step13: Do the two function match? It is far from perfect match. How close are these functions? Let's calculate mean squera error (RMSE).
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
names=['dt', 'east', 'north', 'elev', 'code'] # column names in csv input file
data = pd.read_csv('https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/data_processing/lessons/code/one_day.csv',
sep=',', names=names, parse_dates=['dt'])
data.head
data['elev'].describe()
from matplotlib import pyplot as plt
fig = plt.figure()
ax = data['elev'].plot.kde()
ax.set_title('Density function')
avg = data['elev'].mean()
median = data['elev'].median()
std = data['elev'].std()
ax.plot([avg - 3 * std, avg - 3 * std], [0, 50], [avg + 3 *std, avg + 3 * std], [0, 50])
print('Skewness: {:.3f}, Mean - Median: {:.3f}'.format(data['elev'].skew(), avg - median))
fig = plt.figure()
ax = fig.add_axes([0, 0, 4., 0.5]) # dimensions of the chart window
ax.plot(data['dt'], data['elev'])
ax.set_title('Elevations')
win = 60 # window width for smoothing
data['smooth_elev'] = pd.DataFrame(data[['elev']].rolling(window=win, center=True).median())
# next command does the same as rolling but much slower
#data['test'] = [(data['elev'][i-win//2:i+win//2]).median() if i >=30 and i < data.shape[0]-win/2 else avg for i in data.index]
from datetime import timedelta
deltas = data['dt'].diff()[1:] # calculate difference between adjecent time stamps
dd = deltas[deltas > timedelta(seconds=1)] # select differences larger 1 second
for i in dd.index:
print(data.iloc[[i-1,i]])
data['diff'] = data['dt'] - data['dt'].min() # calculate time difference from the first epoch
data['seconds'] = data['diff'].dt.total_seconds() # time difference in seconds from the first epoch
x_orig = data['seconds'].to_numpy() # convert pandas data series to numpy array
y_orig = data['smooth_elev'].to_numpy()
yy_orig = data['elev'].to_numpy()
import numpy as np
for i in range(0, win//2): # generate smoothed data for the missing first minute
y_orig[i] = np.median(yy_orig[0:i+1]) # medians for shorter interval
y_orig[-i] = np.median(yy_orig[-i-2:-1])
x = np.arange(0, x_orig[-1]) # generate array for each seconds
y = np.interp(x, x_orig, y_orig) # linear interpolation from (x_orig, y_orig) data for each seconds
fig = plt.figure()
ax = fig.add_axes([0, 0, 3, 1])
ax.plot(x_orig[0:300], data['elev'].to_numpy()[0:300], label='original')
ax.plot(x[0:300], y[0:300], label='smoothed')
ax.legend()
ax.set_title('Original and smoothed data, first 5 minutes')
from sklearn.linear_model import LinearRegression
model = LinearRegression()
xx = x_orig.reshape(-1, 1) # use original points
model.fit(xx, y_orig) # calculate linear regression
intercept = model.intercept_ # intercept on y axis
coef = model.coef_ # slope
from math import atan, pi
print('{:.3f} {:.6f}'.format(intercept, atan(coef[0]) * 360 / pi))
y_no_trend = y - intercept - x * coef[0] # remove trend from serie
from scipy.fft import rfft, rfftfreq, irfft
sample_rate = 1 # 1 Hz input data
nfreq = 10 # number of significanf frequences to save
n = x.size # number of data point
yf = rfft(y_no_trend) # calculate FFT
xf = rfftfreq(n, 1 / sample_rate) # calculate vector of frquences
ayf = np.abs(yf)
f = np.sort(ayf)[::-1][:nfreq] # get the most significant frequencies
iyf = np.zeros(yf.shape[0]) # for inverse FFT
freqs = []
print("Significant frequences")
print(" FFT index frequency wave l ampl")
print(" [Hz] [hour] [m]")
for v in f:
i = np.where(ayf == v)[0][0] # get index of frequency
iyf[i] = ayf[i]
print("{:10.1f} {:8d} {:12.8f} {:8.3f} {:8.4f}".format(v, i, xf[i], 1/ xf[i] / 3600, v / n))
ifft = irfft(iyf) # inverse FFT for significant frequences
fig = plt.figure()
ax = fig.add_axes([0, 0, 3, 1])
ax.plot(xf[:200], ayf[:200] / 120)
ax.set_title('Frequency domain (scaled down to (0-1))')
start = 1000
end = 80000
fig = plt.figure()
ax = fig.add_axes([0, 0, 4., 0.5]) # dimensions of the chart window
ax.plot(x_orig[start:end], data['elev'][start:end], label="original")
ax.plot(x[start:end], y[start:end], label='smoothed')
yyy = ifft + intercept + x[:-1] * coef[0] # linear trend added
ax.plot(x[start:end], yyy[start:end], label='FFT')
ax.set_title('original data and fourier serie of the 10 most significant frequency')
ax.legend()
from sklearn.metrics import mean_squared_error
from math import sqrt
MSE = mean_squared_error(y[:-1], yyy)
RMSE = sqrt(MSE)
print('RMSE: {:.4f}'.format(RMSE))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Iris dataset
Step2: PCA
Step3: The P.C. #0 explained variance is one order of magnitude higher than P.C. #1 and #2, and two orders of magnitude higher than P.C. #3. We can us use this knowledge to reduce our dataset from 4D to 3D.
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import datasets
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
%matplotlib notebook
iris = datasets.load_iris()
X = pd.DataFrame(iris.data, columns=iris.feature_names)
y = pd.Series(iris.target, name='FlowerType')
X.head()
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X['sepal length (cm)'], X['sepal width (cm)'], s=35, c=y, cmap=plt.cm.brg)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Sepal length vs. Sepal width')
plt.show()
pca_iris = PCA(n_components=3).fit(iris.data)
pca_iris.explained_variance_ratio_
pca_iris.transform(iris.data)
iris_reduced = PCA(n_components=3).fit(iris.data)
iris_reduced.components_
iris_reduced = PCA(n_components=3).fit_transform(iris.data)
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(iris_reduced[:, 0], iris_reduced[:, 1], iris_reduced[:, 2],
cmap=plt.cm.Paired, c=iris.target)
for k in range(3):
ax.scatter(iris_reduced[y==k, 0], iris_reduced[y==k, 1], iris_reduced[y==k, 2], label=iris.target_names[k])
ax.set_title("First three P.C.")
ax.set_xlabel("P.C. 1")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("P.C. 2")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("P.C. 3")
ax.w_zaxis.set_ticklabels([])
plt.legend(numpoints=1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup - virtualenv jupyter kernel
Step2: Setup - Initialize Django
Step3: Setup R
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import six
print( "packages imported at " + str( datetime.datetime.now() ) )
%pwd
%run ../django_init.py
# start to support python 3:
from __future__ import unicode_literals
from __future__ import division
#==============================================================================#
# ! imports
#==============================================================================#
# grouped by functional area, then alphabetical order by package, then
# alphabetical order by name of thing being imported.
# context_analysis imports
from context_analysis.reliability.reliability_names_analyzer import ReliabilityNamesAnalyzer
#==============================================================================#
# ! logic
#==============================================================================#
# declare variables
my_analysis_instance = None
label = ""
indices_to_process = -1
result_status = ""
# make reliability instance
my_analysis_instance = ReliabilityNamesAnalyzer()
# database connection information - 2 options... Enter it here:
#my_analysis_instance.db_username = ""
#my_analysis_instance.db_password = ""
#my_analysis_instance.db_host = "localhost"
#my_analysis_instance.db_name = "sourcenet"
# Or set up the following properties in Django_Config, inside the django admins.
# All have application of: "sourcenet-db-admin":
# - db_username
# - db_password
# - db_host
# - db_port
# - db_name
# run the analyze method
label = "prelim_month_human"
indices_to_process = 2
result_status = my_analysis_instance.analyze_reliability_names( label, indices_to_process )
print( "result status: {status_string}".format( status_string = result_status ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: t-distribution
Step2: Multivariate t-distribution
Step3:
Step4:
Step5: https
|
<ASSISTANT_TASK:>
Python Code:
from numpy.linalg import inv
import numpy as np
from math import pi, sqrt, gamma
from scipy.stats import t
import matplotlib.pyplot as plt
%matplotlib inline
def my_t(x, df):
_ = (df + 1.)/2.
return gamma(_) / (sqrt(pi* df) * gamma(df/2.) * (1. + x**2/df) ** (_))
def my_t(x, df):
_ = lambda x : (df + x)/2.
return gamma(_(1)) / (sqrt(pi* df) * gamma(_(0)) * (1. + x**2/df) ** (_(1)))
my_t(0, 2.74)
rv = t(2.74)
rv.pdf(0)
def squared_distance(x, mu, sigma):
diff = (x - mu)
return diff.dot(inv(sigma)).dot(diff.T)
def multivariate(x, mu, sigma, df):
p = x.shape[1]
f = lambda _ : (df+_)/2.
det = np.linalg.det(sigma) ** (-1./2.)
param0 = gamma(f(p))
param1 = (np.pi * df) ** (-p/2.)
param2 = gamma(f(0)) ** -1.
delta = x - mu
param3 = 1. + (delta.dot(inv(sigma)).dot(delta.T))/df
param3 = param3 ** (-f(p))
#return param3
return param0 * det * param1 * param2 * param3
np.linalg.det([[1,0],[0,1]]) ** (-1./2.)
x = np.array([1,1])
mu = np.array([3,3])
dec = np.linalg.cholesky([[1,0],[0,1]])
(np.linalg.solve(dec, x - mu) ** 2).sum(axis=0)
multivariate(np.array([[1,1]]), [3,3], [[1,0],[0,1]], 1)
x1, y1 = np.mgrid[-2.5:2.5:.01, -2.5:2.5:.01]
XY = []
for xy in zip(x1, y1):
sample = np.array(xy).T
xy_ = []
for _ in sample:
l = multivariate(_.reshape(1,-1), [.0,.0],[[1.,0.],[0,1.]],100)
xy_.extend(l[0])
XY.append(xy_)
XY = np.array(XY)
print XY.shape
plt.contour(x1, y1, XY)
plt.hlines(1, -2.5, 2.5)
plt.vlines(1, -2.5, 2.5)
plt.show()
x1, y1 = np.mgrid[-2.5:2.5:.01, -2.5:2.5:.01]
XY = []
for xy in zip(x1, y1):
sample = np.array(xy).T
xy_ = []
for _ in sample:
l = multivariate(_.reshape(1,-1), [.0,.0],[[.1,.0],[.0,.2]],100)
xy_.extend(l[0])
XY.append(xy_)
XY = np.array(XY)
print XY.shape
plt.contour(x1, y1, XY)
plt.show()
#written by Enzo Michelangeli, style changes by josef-pktd
# Student's T random variable
def multivariate_t_rvs(m, S, df=np.inf, n=1):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
m = np.asarray(m)
d = len(m)
if df == np.inf:
x = 1.
else:
x = np.random.chisquare(df, n)/df
z = np.random.multivariate_normal(np.zeros(d),S,(n,))
return m + z/np.sqrt(x)[:,None] # same output format as random.multivariate_normal
x1 = multivariate_t_rvs([0,0], [[1,0],[0,1]],9, 300)
x2 = multivariate_t_rvs([1.5,1.5], [[.5,1.],[.1,.7]],9, 300)
plt.scatter(x1[:,0], x1[:,1], alpha=.5)
plt.scatter(x2[:,0], x2[:,1], alpha=.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In GetData_Python, Function_name could be
Step2: In GetPosition_Python, Function_name could be
Step3: In GetFilter_Python, Function_name could be
|
<ASSISTANT_TASK:>
Python Code:
import zeep
import numpy as np
client = zeep.Client('http://turbulence.pha.jhu.edu/service/turbulence.asmx?WSDL')
ArrayOfFloat = client.get_type('ns0:ArrayOfFloat')
ArrayOfArrayOfFloat = client.get_type('ns0:ArrayOfArrayOfFloat')
SpatialInterpolation = client.get_type('ns0:SpatialInterpolation')
TemporalInterpolation = client.get_type('ns0:TemporalInterpolation')
token="edu.jhu.pha.turbulence.testing-201406" #replace with your own token
nnp=5 #number of points
points=np.random.rand(nnp,3)
# convert to JHTDB structures
x_coor=ArrayOfFloat(points[:,0].tolist())
y_coor=ArrayOfFloat(points[:,1].tolist())
z_coor=ArrayOfFloat(points[:,2].tolist())
point=ArrayOfArrayOfFloat([x_coor,y_coor,z_coor]);
print(points)
Function_name="GetVelocityGradient"
time=0.6
number_of_component=9 # change this based on function_name, see http://turbulence.pha.jhu.edu/webquery/query.aspx
result=client.service.GetData_Python(Function_name, token,"isotropic1024coarse", 0.6,
SpatialInterpolation("None_Fd4"), TemporalInterpolation("None"), point)
result=np.array(result).reshape((-1, number_of_component))
print(result)
Function_name="GetPosition"
startTime=0.1
endTime=0.2
dt=0.02
number_of_component=3 # change this based on function_name, see http://turbulence.pha.jhu.edu/webquery/query.aspx
result=client.service.GetPosition_Python(Function_name, token,"isotropic1024coarse", startTime, endTime, dt,
SpatialInterpolation("None"), point)
result=np.array(result).reshape((-1, number_of_component))
print(result)
Function_name="GetBoxFilter" #could also be
field="u"
time=0.6
filterwidth=0.05
spacing=0 #spacing is only used in GetBoxFilterGradient, but always provide it.
number_of_component=3 # change this based on function_name, see http://turbulence.pha.jhu.edu/webquery/query.aspx
result=client.service.GetFilter_Python("GetBoxFilter",token,"isotropic1024coarse", field,
time, filterwidth, SpatialInterpolation("None"), point, spacing)
result=np.array(result).reshape((-1, number_of_component))
print(result)
import struct
import base64
field="u"
timestep=1
x_start=1
y_start=1
z_start=1
x_end=2
y_end=5
z_end=8
x_step=1
y_step=1
z_step=1
filter_width=0
result=client.service.GetAnyCutoutWeb(token,"isotropic1024coarse", field, timestep,
x_start, y_start, z_start, x_end, y_end, z_end,
x_step, y_step, z_step, filter_width, "") # put empty string for the last parameter
# transfer base64 format to numpy
number_of_component=3 # change this based on the field
nx=len(range(x_start, x_end+1, x_step))
ny=len(range(y_start, y_end+1, y_step))
nz=len(range(z_start, z_end+1, z_step))
base64_len=int(nx*ny*nz*number_of_component)
base64_format='<'+str(base64_len)+'f'
result=struct.unpack(base64_format, result)
result=np.array(result).reshape((nz, ny, nx, number_of_component))
print(result.shape) # see the shape of the result and compare it with nx, ny, nz and number of component
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This was run on 1/26/2016, 434 days after November 18, 2014. But the Data Portal only has data up to 1/18/2016, so we want to go to 426 days before the pivot, which works out to 9/18/2013.
Step2: Before we do anything, we should add a column for just date so we can do statistics at the level of individual dates, which is kinda what we're after.
Step3: Our first question
Step4: Now let's graph it.
Step5: Last question (for now)
|
<ASSISTANT_TASK:>
Python Code:
from datetime import datetime, timedelta
pivot = datetime.strptime('11/18/2014', '%m/%d/%Y')
today = datetime.strptime('1/18/2016', '%m/%d/%Y')
print today - pivot
period = timedelta(days=426)
print pivot - period
import pandas as pd
url = 'https://data.cityofchicago.org/api/views/qa42-2iy9/rows.csv?accessType=DOWNLOAD'
frame = pd.read_csv(url, parse_dates=['Date'])
print frame.head(2)
print '%d crimes found' % len(frame)
frame['Date Only'] = pd.to_datetime(frame['Date'].apply(lambda x: x.date()))
pivot = pivot.date()
print '%d crimes on or after %s' % (frame[frame['Date Only'] >= pivot].Date.count(), pivot)
print '%d crimes before %s' % (frame[frame['Date Only'] < pivot].Date.count(), pivot)
# Let's get nicer-looking plots. Can't use ggplot because my version of matplotlib is too old (I think).
pd.set_option('display.mpl_style', 'default')
pd.set_option('display.width', 10000)
pd.set_option('display.max_columns', 60)
# We need to specifically ask matplotlib to display plots inline
%matplotlib inline
import matplotlib.pyplot as plt
frame.groupby('Date Only').count().plot(legend=None)
for ucr in frame['IUCR'].unique():
print ucr
ucr_frame = frame[frame['IUCR'] == ucr]
print '%d crimes on or after %s' % (ucr_frame[ucr_frame['Date Only'] >= pivot].Date.count(), pivot)
print '%d crimes before %s' % (ucr_frame[ucr_frame['Date Only'] < pivot].Date.count(), pivot)
print '---'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
learning_rate = 0.01
inputs_ = tf.placeholder(tf.float32, [None, 28, 28, 1], name='inputs')
targets_ = tf.placeholder(tf.float32, [None, 28, 28, 1], name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, filters=16, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, 2, 2, 'same')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, filters=8, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, 2, 2, 'same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, filters=8, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, 2, 2, 'same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7, 7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, filters=8, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14, 14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, filters=8, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28, 28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, filters=16, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, filters=1, kernel_size=(3,3), strides=(1, 1), padding='same', activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 20
batch_size = 500
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}\r".format(batch_cost), end='')
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
learning_rate = 0.01
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, filters=32, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, 2, 2, 'same')
# Now 14x14x32
conv2 = tf.layers.conv2d(maxpool1, filters=32, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, 2, 2, 'same')
# Now 7x7x32
conv3 = tf.layers.conv2d(maxpool2, filters=16, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, 2, 2, 'same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7, 7))
# Now 7x7x16
conv4 = tf.layers.conv2d(upsample1, filters=16, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14, 14))
# Now 14x14x16
conv5 = tf.layers.conv2d(upsample2, filters=32, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28, 28))
# Now 28x28x32
conv6 = tf.layers.conv2d(upsample3, filters=32, kernel_size=(3,3), strides=(1, 1), padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(conv6, filters=1, kernel_size=(3,3), strides=(1, 1), padding='same', activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 500
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}\r".format(batch_cost), end='')
print()
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Trapezoidal rule
Step4: Now use scipy.integrate.quad to integrate the f and g functions and see how the result compares with your trapz function. Print the results and errors.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy import integrate
def trapz(f, a, b, N):
Integrate the function f(x) over the range [a,b] with N points.
N = N+1
a = a
b = b
h = (b-a)/N
k1 = np.arange(a, b, N)
k2 = np.arange(a,b,)
def trapz(f, a, b, N):
Integrate the function f(x) over the range [a,b] with N points.
N = N+1
a = a
b = b
h = (b-a)/N
k = np.arange(1,N)
return h*(0.5*f(a) + 0.5*f(b) + f(a+k*h).sum())
f = lambda x: x**2
g = lambda x: np.sin(x)
I = trapz(f, 0, 1, 1000)
assert np.allclose(I, 0.33333349999999995)
J = trapz(g, 0, np.pi, 1000)
assert np.allclose(J, 1.9999983550656628)
RYANISAWESOME = integrate.quad(f,0,1)[0]
RYANISSTILLAWESOME = integrate.quad(g,0,np.pi)[0]
error1 = np.abs(I - RYANISAWESOME) / RYANISAWESOME
error2 = np.abs(J - RYANISSTILLAWESOME) / RYANISSTILLAWESOME
print("{0} vs scipy.integrate.quad error = {1}".format("I",error1))
print("{0} vs scipy.integrate.quad error = {1}".format("J",error2))
assert True # leave this cell to grade the previous one
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's plot that data, just to make it clearer
Step2: Now we need to make our network and train it.
Step3: Now we create our network. I don't quite understand exactly what's happening here, but I copied it from an LSTM tutorial.
Step4: Now we train it.
|
<ASSISTANT_TASK:>
Python Code:
t = np.arange(50)*0.05
input_data = np.sign(np.array([np.sin(2*np.pi*t),np.sin(2*np.pi*t)]).T).astype(float)
input_data += np.random.normal(size=input_data.shape)*0.1
output_data = (np.sign(np.sin(2*np.pi*t*2+np.pi)).astype(float)+1)/2
print('Input Data', input_data)
print('Output Data', output_data)
plt.subplot(2,1,1)
plt.plot(input_data)
plt.title('input data')
plt.subplot(2,1,2)
plt.plot(output_data)
plt.title('output data')
plt.tight_layout()
plt.show()
n_epochs = 4000 # number of times to run the training
n_units = 200 # size of the neural network
n_classes = 1 # number of values in the output
n_features = 2 # number of values in the input
X = tf.placeholder('float',[None,n_features])
Y = tf.placeholder('float')
weights = tf.Variable(tf.random_normal([n_units, n_classes]))
bias = tf.Variable(tf.random_normal([n_classes]))
x = tf.split(X, n_features, 1)
lstm_cell = rnn.BasicLSTMCell(n_units)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.matmul(outputs[-1], weights) + bias
output = tf.reshape(output, [-1])
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as session:
# initialize the network
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
# now do the training
for epoch in range(n_epochs):
# this does one pass through the traiing
_, error = session.run([optimizer, cost], feed_dict={X: input_data, Y: output_data})
# print a message every 100 epochs
if epoch % 100 == 0:
print('Epoch', epoch, 'completed out of', n_epochs, 'error:', error)
# now compute the output after training
pred = tf.round(tf.nn.sigmoid(output)).eval({X: input_data})
plt.subplot(2, 1, 1)
plt.title('ideal output')
plt.plot(output_data)
plt.subplot(2, 1, 2)
plt.title('predicted output')
plt.plot(pred)
plt.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Scikit-learn Estimator Object
Step2: Estimator parameters
Step3: Estimated Model parameters
Step4: The model found a line with a slope 2 and intercept 1, as we'd expect.
Step5: You can also do probabilistic predictions
Step6: Exercise
Step7: As above, we can plot a line of best fit
Step8: Scikit-learn also has some more sophisticated models, which can respond to finer features in the data
Step9: Whether either of these is a "good" fit or not depends on a number of things; we'll discuss details of how to choose a model later in the tutorial.
Step10: Clustering
Step11: Recap
Step12: A more useful way to look at the results is to view the confusion matrix, or the matrix showing the frequency of inputs and outputs
Step13: For each class, all 50 training samples are correctly identified. But this does not mean that our model is perfect! In particular, such a model generalizes extremely poorly to new data. We can simulate this by splitting our data into a training set and a testing set. Scikit-learn contains some convenient routines to do this
Step14: This paints a better picture of the true performance of our classifier
Step15: Original source on the scikit-learn website
Step16: Let's plot a few of these
Step17: Here the data is simply each pixel value within an 8x8 grid
Step18: So our data have 1797 samples in 64 dimensions.
Step19: We see here that the digits are fairly well-separated in the parameter space; this tells us that a supervised classification algorithm should perform fairly well. Let's give it a try.
Step20: Let's use a simple logistic regression which (despite its confusing name) is a classification algorithm
Step21: We can check our classification accuracy by comparing the true values of the test set to the predictions
Step22: This single number doesn't tell us where we've gone wrong
Step23: We might also take a look at some of the outputs along with their predicted labels. We'll make the bad labels red
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
model = LinearRegression(normalize=True)
print(model.normalize)
print(model)
model2 = LinearRegression()
model?
x = np.arange(10)
y = 2 * x + 1
print(x)
print(y)
plt.plot(x, y, 'o');
# The input data for sklearn is 2D: (samples == 10 x features == 1)
X = x[:, np.newaxis]
print(X)
print(y)
# fit the model on our data
model.fit(X, y)
# underscore at the end indicates a fit parameter
print(model.coef_)
print(model.intercept_)
# residual error around fit
model.residues_
model.score(X, y)
from sklearn import neighbors, datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
# create the model
knn = neighbors.KNeighborsClassifier(n_neighbors=5)
# fit the model
knn.fit(X, y)
# What kind of iris has 3cm x 5cm sepal and 4cm x 2cm petal?
# call the "predict" method:
result = knn.predict([[5, 3, 4, 2],])
print(iris.target_names[result])
iris.target_names
knn.predict_proba([[5, 3, 4, 2],])
from fig_code import plot_iris_knn
plot_iris_knn()
# Create some simple data
import numpy as np
np.random.seed(0)
X = np.random.random(size=(20, 1))
y = 3 * X.squeeze() + 2 + np.random.randn(20)
plt.plot(X.squeeze(), y, 'o');
model = LinearRegression()
model.fit(X, y)
# Plot the data and the model prediction
X_fit = np.linspace(0, 1, 100)[:, np.newaxis]
y_fit = model.predict(X_fit)
plt.plot(X.squeeze(), y, 'o')
plt.plot(X_fit.squeeze(), y_fit);
# Fit a Random Forest
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(n_estimators=10, max_depth=5)
model.fit(X, y)
# Plot the data and the model prediction
X_fit = np.linspace(0, 1, 100)[:, np.newaxis]
y_fit = model.predict(X_fit)
plt.plot(X.squeeze(), y, 'o')
plt.plot(X_fit.squeeze(), y_fit);
X, y = iris.data, iris.target
from sklearn.decomposition import PCA
pca = PCA(n_components=0.95)
pca.fit(X)
X_reduced = pca.transform(X)
print("Reduced dataset shape:", X_reduced.shape)
import pylab as plt
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], #c=y,
cmap='RdYlBu')
print("Meaning of the 2 components:")
for component in pca.components_:
print(" + ".join("%.3f x %s" % (value, name)
for value, name in zip(component,
iris.feature_names)))
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=3, random_state=0) # Fixing the RNG in kmeans
k_means.fit(X)
y_pred = k_means.predict(X)
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y_pred,
cmap='RdYlBu');
from sklearn.neighbors import KNeighborsClassifier
X, y = iris.data, iris.target
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(X, y)
y_pred = clf.predict(X)
print(np.all(y == y_pred))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y, y_pred))
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, random_state=23)
clf.fit(Xtrain, ytrain)
ypred = clf.predict(Xtest)
print(confusion_matrix(ytest, ypred))
Xtest.shape[0] / len(X)
from IPython.display import Image
Image("http://scikit-learn.org/dev/_static/ml_map.png")
from sklearn import datasets
digits = datasets.load_digits()
digits.images.shape
fig, axes = plt.subplots(10, 10, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
ax.imshow(digits.images[i], cmap='binary', interpolation='nearest')
ax.text(0.05, 0.05, str(digits.target[i]),
transform=ax.transAxes, color='green')
ax.set_xticks([])
ax.set_yticks([])
# The images themselves
print(digits.images.shape)
print(digits.images[0])
# The data for use in our algorithms
print(digits.data.shape)
print(digits.data[0])
# The target label
print(digits.target)
from sklearn.manifold import Isomap
iso = Isomap(n_components=2)
data_projected = iso.fit_transform(digits.data)
data_projected.shape
plt.scatter(data_projected[:, 0], data_projected[:, 1], c=digits.target,
edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('nipy_spectral', 10));
plt.colorbar(label='digit label', ticks=range(10))
plt.clim(-0.5, 9.5)
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target,
random_state=2)
print(Xtrain.shape, Xtest.shape)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(penalty='l2')
clf.fit(Xtrain, ytrain)
ypred = clf.predict(Xtest)
from sklearn.metrics import accuracy_score
accuracy_score(ytest, ypred)
from sklearn.metrics import confusion_matrix
print(confusion_matrix(ytest, ypred))
plt.imshow(np.log(confusion_matrix(ytest, ypred)),
cmap='Blues', interpolation='nearest')
plt.grid(False)
plt.ylabel('true')
plt.xlabel('predicted');
fig, axes = plt.subplots(10, 10, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
ax.imshow(Xtest[i].reshape(8, 8), cmap='binary')
ax.text(0.05, 0.05, str(ypred[i]),
transform=ax.transAxes,
color='green' if (ytest[i] == ypred[i]) else 'red')
ax.set_xticks([])
ax.set_yticks([])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load up and prep the datasets
Step2: Construct the training data and targets
Step4: Validation Dataset
Step6: Multi-layer GRU Model with LReLU
Step7: Calculate the score on my predictions
Step8: Visualize the predictions vs true
Step9: Early Conclusions
Step10: Try CNN LSTM
Step11: Enough tinkering around
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, LeakyReLU, Dropout, ReLU, GRU, TimeDistributed, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.callbacks import EarlyStopping
from jlab import load_test_data, get_test_detector_plane
X_train = pd.read_csv('MLchallenge2_training.csv')
X_test = load_test_data('test_in.csv')
eval_planes = get_test_detector_plane(X_test)
# Also, load our truth values
y_true = pd.read_csv('test_prediction.csv', names=['x', 'y', 'px', 'py', 'pz'],
header=None)
X_test.head()
y_true.head()
N_SAMPLES = len(X_train)
N_DETECTORS = 25
N_KINEMATICS = 6
SHAPE = (N_SAMPLES, N_DETECTORS-1, N_KINEMATICS)
X_train_list = []
y_train_array = np.ndarray(shape=(N_SAMPLES, N_KINEMATICS-1))
for ix in range(N_SAMPLES):
seq_len = np.random.choice(range(8, 25))
track = X_train.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)
X_train_list.append(track[0:seq_len])
# Store the kinematics of the next in the sequence
# Ignore the 3rd one, which is z
y_train_array[ix] = track[seq_len][[0,1,3,4,5]]
for track in X_train_list[:10]:
print(len(track))
X_train_list = pad_sequences(X_train_list, dtype=float)
for track in X_train_list[:10]:
print(len(track))
X_train_array = np.array(X_train_list)
X_train_array.shape
y_train_array.shape
N_TEST_SAMPLES = len(X_test)
y_test_array = y_true.values
X_test_list = []
for ix in range(N_TEST_SAMPLES):
seq_len = get_test_detector_plane(X_test.iloc[ix])
track = X_test.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)
X_test_list.append(track[0:seq_len])
X_test_list = pad_sequences(X_test_list, dtype=float)
X_test_array = np.array(X_test_list)
X_test_array.shape
y_test_array.shape
y_true.values.shape
import pandas as pd
import numpy as np
from math import floor
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
data = pd.read_csv('MLchallenge2_training.csv')
# Z values are constant -- what are they?
Z_VALS = data[['z'] + [f'z{i}' for i in range(1, 25)]].loc[0].values
# Z-distance from one timestep to another is set; calculate it
Z_DIST = [Z_VALS[i+1] - Z_VALS[i] for i in range(0, 24)] + [0.0]
# Number of timesteps
N_DETECTORS = 25
# Provided number of kinematics
N_KINEMATICS = 6
# Number of features after engineering them all
N_FEATURES = 13
def get_detector_meta(kin_array, det_id):
# Is there a large gap after this detector?
# 0 is for padded timesteps
# 1 is for No, 2 is for Yes
mind_the_gap = int(det_id % 6 == 0) + 1
# Detector group: 1 (origin), 2, 3, 4, or 5
det_grp = floor((det_id-1) / 6) + 2
# Detectors numbered 1-6 (origin is 6)
# (Which one in the group of six is it?)
det_rank = ((det_id-1) % 6) + 1
# Distance to the next detector?
z_dist = Z_DIST[det_id]
# Transverse momentum (x-y component)
pt = np.sqrt(np.square(kin_array[3]) + np.square(kin_array[4]))
# Total momentum
p_tot = np.sqrt(np.square(kin_array[3])
+ np.square(kin_array[4])
+ np.square(kin_array[5]))
# Put all the calculated features together
det_meta = np.array([det_id, mind_the_gap, det_grp, det_rank,
z_dist, pt, p_tot])
# Return detector data plus calculated features
return np.concatenate([kin_array, det_meta], axis=None)
def tracks_to_time_series(X):
Convert training dataframe to multivariate time series training set
Pivots each track to a series ot timesteps. Then randomly truncates them
to be identical to the provided test set. The step after the truncated
step is saved as the target.
Truncated sequence are front-padded with zeros.
Parameters
----------
X : pandas.DataFrame
Returns
-------
(numpy.ndarray, numpy.ndarray)
Tuple of the training data and labels
X_ts_list = []
n_samples = len(X)
y_array = np.ndarray(shape=(n_samples, N_KINEMATICS-1))
for ix in range(n_samples):
# Randomly choose how many detectors the track went through
track_len = np.random.choice(range(8, 25))
# Reshape into ts-like
track = X.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)
#eng_track = np.zeros(shape=(N_DETECTORS, N_FEATURES))
#for i in range(0, N_DETECTORS):
# eng_track[i] = get_detector_meta(track[i], i)
# Truncate the track to only N detectors
X_ts_list.append(track[0:track_len])
# Store the kinematics of the next in the sequence
# Ignore the 3rd one, which is z
y_array[ix] = track[track_len][[0,1,3,4,5]]
# Pad the training sequence
X_ts_list = pad_sequences(X_ts_list, dtype=float)
X_ts_array = np.array(X_ts_list)
return X_ts_array, y_array
X, y = tracks_to_time_series(data)
X[3]
y[3]
X_train, X_test, y_train, y_test = train_test_split(X, y)
len(X_train), len(X_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GRU, Dense, LeakyReLU, Dropout
from tensorflow.keras.callbacks import EarlyStopping
import joblib
def lrelu(x):
return LeakyReLU()(x)
def gru_model(gru_units=35, dense_units=100,
dropout_rate=0.25):
Model definition.
Three layers of Gated Recurrent Units (GRUs), utilizing
LeakyReLU activations, finally passing GRU block output
to a dense layer, passing its output to the final output
layer, with a touch of dropout in between.
Bon apetit.
Parameters
----------
gru_units : int
dense_units : int
dropout_rate : float
Returns
-------
tensorflow.keras.models.Sequential
model = Sequential()
model.add(GRU(gru_units, activation=lrelu,
input_shape=(N_DETECTORS-1, N_KINEMATICS),
return_sequences=True))
model.add(GRU(gru_units, activation=lrelu,
return_sequences=True))
model.add(GRU(gru_units, activation=lrelu))
model.add(Dense(dense_units, activation=lrelu))
model.add(Dropout(dropout_rate))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
model = gru_model()
model.summary()
from tensorflow.keras.utils import plot_model
plot_model(model, to_file='gru_model.png', show_shapes=True)
es = EarlyStopping(monitor='val_loss', mode='min',
patience=5, restore_best_weights=True)
history = model.fit(
x=X_train,
y=y_train,
validation_data=(X_test, y_test),
callbacks=[es],
epochs=50,
)
model.save("gru_model.h5")
joblib.dump(history.history, "gru_model.history")
history = joblib.load("dannowitz_jlab2_model_20191031.history")
import matplotlib.pyplot as plt
# Plot training & validation loss values
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
pred = pd.read_csv('data/submission/dannowitz_jlab2_submission_20191112.csv', header=None)
truth = pd.read_csv('data/ANSWERS.csv', header=None)
# Calculate square root of the mean squared error
# Then apply weights and sum them all up
sq_error = (truth - pred).applymap(np.square)
mse = sq_error.sum() / len(truth)
rmse = np.sqrt(mse)
rms_weighted = rmse / [0.03, 0.03, 0.01, 0.01, 0.011]
score = rms_weighted.sum()
score
def lstm_model():
model = Sequential()
model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1, activation='linear'))
model.compile(loss='mse', optimizer='adam')
return model
model = lstm_model()
model.summary()
history = model.fit(x=X_train_array, y=y_train_array, validation_data=(X_test_array, y_test_array), epochs=5)
history = model.fit(x=X_train_array, y=y_train_array,
validation_data=(X_test_array, y_test_array),
epochs=50, use_multiprocessing=True)
model = lstm_model()
es = EarlyStopping(monitor='val_loss', mode='min')
history = model.fit(x=X_train_array, y=y_train_array,
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
model.save("lstm100-dense100-dropout025-epochs20-early-stopping.h5")
def lstm_model_lin():
model = Sequential()
model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1, activation='linear'))
model.compile(loss='mse', optimizer='adam')
return model
lin_act_model = lstm_model_lin()
es = EarlyStopping(monitor='val_loss', mode='min')
history = lin_act_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
def lstm_model_adam():
model = Sequential()
model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
adam_model = lstm_model_adam()
es = EarlyStopping(monitor='val_loss', mode='min')
history = adam_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
def lstm_model_dropout50():
model = Sequential()
model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.50))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
dropout50_model = lstm_model_dropout50()
es = EarlyStopping(monitor='val_loss', mode='min')
history = dropout50_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
def lstm_model_nodropout():
model = Sequential()
model.add(LSTM(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
nodropout_model = lstm_model_nodropout()
es = EarlyStopping(monitor='val_loss', mode='min')
history = nodropout_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
def lstm_model_relu():
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
relu_model = lstm_model_relu()
es = EarlyStopping(monitor='val_loss', mode='min')
history = relu_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
def model_gru():
model = Sequential()
model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
gru_model = model_gru()
es = EarlyStopping(monitor='val_loss', mode='min')
history = gru_model.fit(x=X_train_array[:10000], y=y_train_array[:10000],
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
def model_v2():
model = Sequential()
model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
v2_model = model_v2()
es = EarlyStopping(monitor='val_loss', mode='min')
history = v2_model.fit(x=X_train_array, y=y_train_array,
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=8, use_multiprocessing=True)
from tensorflow.keras.back
def model_v2_deep():
model = Sequential()
model.add(GRU(30, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS),
return_sequences=True))
model.add(GRU(30, activation=LeakyReLU(), return_sequences=True))
model.add(GRU(30, activation=LeakyReLU()))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
v2_model_deep = model_v2_deep()
v2_model_deep.summary()
es = EarlyStopping(monitor='val_loss', mode='min', patience=2, restore_best_weights=True)
history = v2_model_deep.fit(x=X_train_array, y=y_train_array,
validation_data=(X_test_array, y_test_array),
callbacks=[es],
epochs=8, use_multiprocessing=True)
def model_v2_dbl_gru():
model = Sequential()
model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS),
return_sequences=True))
model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
v2_model_dbl_gru = model_v2_dbl_gru()
es = EarlyStopping(monitor='val_loss', mode='min')
history = v2_model_dbl_gru.fit(x=X_train_array[:20000], y=y_train_array[:20000],
validation_data=(X_test_array, y_test_array),
#callbacks=[es],
epochs=10, use_multiprocessing=True)
def model_v2_2x_dropout():
model = Sequential()
model.add(GRU(200, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dropout(0.25))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
v2_model_dbl_dropout = model_v2_2x_dropout()
es = EarlyStopping(monitor='val_loss', mode='min')
history = v2_model_dbl_dropout.fit(x=X_train_array[:20000], y=y_train_array[:20000],
validation_data=(X_test_array, y_test_array),
callbacks=[es], epochs=20, use_multiprocessing=True)
def model_v2_big_gru():
model = Sequential()
model.add(GRU(400, activation=LeakyReLU(), input_shape=(N_DETECTORS-1, N_KINEMATICS)))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
v2_model_big_gru = model_v2_big_gru()
es = EarlyStopping(monitor='val_loss', mode='min')
history = v2_model_big_gru.fit(x=X_train_array[:20000], y=y_train_array[:20000],
validation_data=(X_test_array, y_test_array),
#callbacks=[es],
epochs=10, use_multiprocessing=True)
v2_model_big_gru.fit(x=X_train_array[:20000], y=y_train_array[:20000],
validation_data=(X_test_array, y_test_array),
#callbacks=[es],
epochs=15, use_multiprocessing=True, initial_epoch=10)
X_train_array.shape
def cnn_gru():
model = Sequential()
model.add(Conv1D(filters=5, kernel_size=2, strides=1, input_shape=(N_DETECTORS-1, N_KINEMATICS)))
#model.add(MaxPooling1D())
model.add(GRU(200, activation=LeakyReLU()))
model.add(Dense(100, activation=LeakyReLU()))
model.add(Dropout(0.25))
model.add(Dense(N_KINEMATICS-1))
model.compile(loss='mse', optimizer='adam')
return model
cnn_model = cnn_gru()
cnn_model.summary()
#es = EarlyStopping(monitor='val_loss', mode='min')
history = cnn_model.fit(x=X_train_array[:20000], y=y_train_array[:20000],
validation_data=(X_test_array, y_test_array),
epochs=10, use_multiprocessing=True)
history.history
from train import train
from predict import predict
model = train(frac=1.00, filename="dannowitz_jlab2_model", epochs=100, ret_model=True)
preds = predict(model_filename="dannowitz_jlab2_model.h5",
data_filename="test_in (1).csv",
output_filename="danowitz_jlab2_submission.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 - Dataset
Step2: Visualize the dataset using matplotlib. The data looks like a "flower" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data.
Step3: You have
Step4: Expected Output
Step5: You can now plot the decision boundary of these models. Run the code below.
Step7: Expected Output
Step9: Expected Output (these are not the sizes you will use for your network, they are just used to assess the function you've just coded).
Step11: Expected Output
Step13: Expected Output
Step15: Expected Output
Step17: Expected output
Step19: Expected Output
Step21: Expected Output
Step22: Expected Output
Step23: Expected Output
Step24: Expected Output
Step25: Interpretation
|
<ASSISTANT_TASK:>
Python Code:
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
np.random.seed(1) # set a seed so that the results are consistent
X, Y = load_planar_dataset()
# Y = Y[0,:]
print(X.shape)
# Visualize the data:
plt.scatter(X[0, :], X[1, :], c=Y[0,], s=40, cmap=plt.cm.Spectral);
### START CODE HERE ### (≈ 3 lines of code)
shape_X = X.shape
shape_Y = Y.shape
m = shape_Y[1] # training set size
### END CODE HERE ###
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y[0,:].T);
# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y[0,:])
plt.title("Logistic Regression")
# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
# GRADED FUNCTION: layer_sizes
def layer_sizes(X, Y):
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
### START CODE HERE ### (≈ 3 lines of code)
n_x = X.shape[0] # size of input layer
n_h = 4
n_y = Y.shape[0] # size of output layer
### END CODE HERE ###
return (n_x, n_h, n_y)
X_assess, Y_assess = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
#
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Implement Forward Propagation to calculate A2 (probabilities)
### START CODE HERE ### (≈ 4 lines of code)
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
### END CODE HERE ###
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
X_assess, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(X_assess, parameters)
# Note: we use the mean here just to make sure that your output matches ours.
print(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))
# GRADED FUNCTION: compute_cost
def compute_cost(A2, Y, parameters):
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
m = Y.shape[1] # number of example
# Retrieve W1 and W2 from parameters
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters['W1']
W2 = parameters['W2']
### END CODE HERE ###
# Compute the cross-entropy cost
### START CODE HERE ### (≈ 2 lines of code)
cost = - np.mean((np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), 1 - Y)))
### END CODE HERE ###
cost = np.squeeze(cost) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
A2, Y_assess, parameters = compute_cost_test_case()
print("cost = " + str(compute_cost(A2, Y_assess, parameters)))
# GRADED FUNCTION: backward_propagation
def backward_propagation(parameters, cache, X, Y):
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters['W1']
W2 = parameters['W2']
### END CODE HERE ###
# Retrieve also A1 and A2 from dictionary "cache".
### START CODE HERE ### (≈ 2 lines of code)
A1 = cache['A1']
A2 = cache['A2']
### END CODE HERE ###
# Backward propagation: calculate dW1, db1, dW2, db2.
### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)
dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.mean(dZ2, axis = 1, keepdims = True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1,2))
dW1 = np.dot(dZ1, X.T) / m
db1 = np.mean(dZ1, axis = 1, keepdims = True)
### END CODE HERE ###
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
parameters, cache, X_assess, Y_assess = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, X_assess, Y_assess)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("db2 = "+ str(grads["db2"]))
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate = 1.2):
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Retrieve each gradient from the dictionary "grads"
### START CODE HERE ### (≈ 4 lines of code)
dW1 = grads['dW1']
db1 = grads['db1']
dW2 = grads['dW2']
db2 = grads['db2']
## END CODE HERE ###
# Update rule for each parameter
### START CODE HERE ### (≈ 4 lines of code)
W1 -= learning_rate * dW1
b1 -= learning_rate * db1
W2 -= learning_rate * dW2
b2 -= learning_rate * db2
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: nn_model
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: "n_x, n_h, n_y". Outputs = "W1, b1, W2, b2, parameters".
### START CODE HERE ### (≈ 5 lines of code)
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Loop (gradient descent)
import pdb
for i in range(0, num_iterations):
### START CODE HERE ### (≈ 4 lines of code)
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2, Y, parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters, cache, X, Y)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters, grads)
### END CODE HERE ###
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
X_assess, Y_assess = nn_model_test_case()
parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=False)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: predict
def predict(parameters, X):
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (≈ 2 lines of code)
A2, cache = forward_propagation(X, parameters)
predictions = A2 >= 0.5
### END CODE HERE ###
return predictions
parameters, X_assess = predict_test_case()
predictions = predict(parameters, X_assess)
print("predictions mean = " + str(np.mean(predictions)))
# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y[0,])
plt.title("Decision Boundary for hidden layer size " + str(4))
# Print accuracy
predictions = predict(parameters, X)
print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')
# This may take about 2 minutes to run
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer of size %d' % n_h)
parameters = nn_model(X, Y, n_h, num_iterations = 5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y[0,])
predictions = predict(parameters, X)
accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)
print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
# Datasets
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
### START CODE HERE ### (choose your dataset)
dataset = "noisy_moons"
### END CODE HERE ###
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y%2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=Y[0,], s=40, cmap=plt.cm.Spectral);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Population by age
Step2: Exercise. What do you see here? What else would you like to know?
Step3: Exercise. What do you see here? What else would you like to know?
Step4: Exercise. What other countries would you like to see? Can you add them? The code below generates a list.
Step5: Exercise. Why do you think the US is falling behind? What would you look at to verify your conjecture?
Step6: Comment. At this point, we need to pivot the data. That's not something we've done before, so take it as simply something we can do easily if we have to. We're going to do this twice to produce different graphs
Step7: Exercises.
|
<ASSISTANT_TASK:>
Python Code:
# import packages
import pandas as pd # data management
import matplotlib.pyplot as plt # graphics
import matplotlib as mpl # graphics parameters
import numpy as np # numerical calculations
# IPython command, puts plots in notebook
%matplotlib inline
# check Python version
import datetime as dt
import sys
print('Today is', dt.date.today())
print('What version of Python are we running? \n', sys.version, sep='')
url1 = 'http://esa.un.org/unpd/wpp/DVD/Files/'
url2 = '1_Indicators%20(Standard)/EXCEL_FILES/1_Population/'
url3 = 'WPP2015_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.XLS'
url = url1 + url2 + url3
cols = [2, 5] + list(range(6,28))
#est = pd.read_excel(url, sheetname=0, skiprows=16, parse_cols=cols, na_values=['…'])
prj = pd.read_excel(url, sheetname=1, skiprows=16, parse_cols=cols, na_values=['…'])
prj.head(3)[list(range(6))]
# rename some variables
pop = prj
names = list(pop)
pop = pop.rename(columns={names[0]: 'Country',
names[1]: 'Year'})
# select country and years
country = ['Japan']
years = [2015, 2055, 2095]
pop = pop[pop['Country'].isin(country) & pop['Year'].isin(years)]
pop = pop.drop(['Country'], axis=1)
# set index = Year
# divide by 1000 to convert numbers from thousands to millions
pop = pop.set_index('Year')/1000
pop.head()[list(range(8))]
# transpose (T) so that index = age
pop = pop.T
pop.head(3)
ax = pop.plot(kind='bar',
color='blue',
alpha=0.5, subplots=True, sharey=True, figsize=(8,6))
for axnum in range(len(ax)):
ax[axnum].set_title('')
ax[axnum].set_ylabel('Millions')
ax[0].set_title('Population by age', fontsize=14, loc='left')
# fertility overall
uft = 'http://esa.un.org/unpd/wpp/DVD/Files/'
uft += '1_Indicators%20(Standard)/EXCEL_FILES/'
uft += '2_Fertility/WPP2015_FERT_F04_TOTAL_FERTILITY.XLS'
cols = [2] + list(range(5,18))
ftot = pd.read_excel(uft, sheetname=0, skiprows=16, parse_cols=cols, na_values=['…'])
ftot.head(3)[list(range(6))]
# rename some variables
names = list(ftot)
f = ftot.rename(columns={names[0]: 'Country'})
# select countries
countries = ['China', 'Japan', 'Germany', 'United States of America']
f = f[f['Country'].isin(countries)]
# shape
f = f.set_index('Country').T
f = f.rename(columns={'United States of America': 'United States'})
f.tail(3)
fig, ax = plt.subplots()
f.plot(ax=ax, kind='line', alpha=0.5, lw=3, figsize=(6.5, 4))
ax.set_title('Fertility (births per woman, lifetime)', fontsize=14, loc='left')
ax.legend(loc='best', fontsize=10, handlelength=2, labelspacing=0.15)
ax.set_ylim(ymin=0)
ax.hlines(2.1, -1, 13, linestyles='dashed')
ax.text(8.5, 2.4, 'Replacement = 2.1')
# life expectancy at birth, both sexes
ule = 'http://esa.un.org/unpd/wpp/DVD/Files/1_Indicators%20(Standard)/EXCEL_FILES/3_Mortality/'
ule += 'WPP2015_MORT_F07_1_LIFE_EXPECTANCY_0_BOTH_SEXES.XLS'
cols = [2] + list(range(5,34))
le = pd.read_excel(ule, sheetname=0, skiprows=16, parse_cols=cols, na_values=['…'])
le.head(3)[list(range(10))]
# rename some variables
oldname = list(le)[0]
l = le.rename(columns={oldname: 'Country'})
l.head(3)[list(range(8))]
# select countries
countries = ['China', 'Japan', 'Germany', 'United States of America']
l = l[l['Country'].isin(countries)]
# shape
l = l.set_index('Country').T
l = l.rename(columns={'United States of America': 'United States'})
l.tail()
fig, ax = plt.subplots()
l.plot(ax=ax, kind='line', alpha=0.5, lw=3, figsize=(6, 8), grid=True)
ax.set_title('Life expectancy at birth', fontsize=14, loc='left')
ax.set_ylabel('Life expectancy in years')
ax.legend(loc='best', fontsize=10, handlelength=2, labelspacing=0.15)
ax.set_ylim(ymin=0)
countries = le.rename(columns={oldname: 'Country'})['Country']
# mortality overall
url = 'http://esa.un.org/unpd/wpp/DVD/Files/'
url += '1_Indicators%20(Standard)/EXCEL_FILES/3_Mortality/'
url += 'WPP2015_MORT_F17_1_ABRIDGED_LIFE_TABLE_BOTH_SEXES.XLS'
cols = [2, 5, 6, 7, 9]
mort = pd.read_excel(url, sheetname=0, skiprows=16, parse_cols=cols, na_values=['…'])
mort.tail(3)
# change names
names = list(mort)
m = mort.rename(columns={names[0]: 'Country', names[2]: 'Age', names[3]: 'Interval', names[4]: 'Mortality'})
m.head(3)
# compare countries for most recent period
countries = ['China', 'Japan', 'Germany', 'United States of America']
mt = m[m['Country'].isin(countries) & m['Interval'].isin([5]) & m['Period'].isin(['2010-2015'])]
print('Dimensions:', mt.shape)
mp = mt.pivot(index='Age', columns='Country', values='Mortality')
mp.head(3)
fig, ax = plt.subplots()
mp.plot(ax=ax, kind='line', alpha=0.5, linewidth=3,
# logy=True,
figsize=(6, 4))
ax.set_title('Mortality by age', fontsize=14, loc='left')
ax.set_ylabel('Mortality Rate (log scale)')
ax.legend(loc='best', fontsize=10, handlelength=2, labelspacing=0.15)
# compare periods for the one country -- countries[0] is China
mt = m[m['Country'].isin([countries[0]]) & m['Interval'].isin([5])]
print('Dimensions:', mt.shape)
mp = mt.pivot(index='Age', columns='Period', values='Mortality')
mp = mp[[0, 6, 12]]
mp.head(3)
fig, ax = plt.subplots()
mp.plot(ax=ax, kind='line', alpha=0.5, linewidth=3,
# logy=True,
figsize=(6, 4))
ax.set_title('Mortality over time', fontsize=14, loc='left')
ax.set_ylabel('Mortality Rate (log scale)')
ax.legend(loc='best', fontsize=10, handlelength=2, labelspacing=0.15)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Model
Step3: Sampling using PyMC NUTS Sampler
Step4: Sampling using PyMC JAX Numpyro NUTS sampler
Step5: Sampling using BlackJax
Step6: Sampling
|
<ASSISTANT_TASK:>
Python Code:
import jax
import numpy as np
import pymc as pm
import pymc.sampling_jax
import blackjax
print(f"Running on PyMC v{pm.__version__}")
# Data of the Eight Schools Model
J = 8
y = np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0])
sigma = np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0])
with pm.Model() as model:
mu = pm.Normal("mu", mu=0.0, sigma=10.0)
tau = pm.HalfCauchy("tau", 5.0)
theta = pm.Normal("theta", mu=0, sigma=1, shape=J)
theta_1 = mu + tau * theta
obs = pm.Normal("obs", mu=theta, sigma=sigma, shape=J, observed=y)
%%time
with model:
posterior = pm.sample(50_000, chains=1)
%%time
with model:
hierarchical_trace_jax = pm.sampling_jax.sample_numpyro_nuts(
50_000, target_accept=0.9, chains=1, progress_bar=False
)
from pymc.sampling_jax import get_jaxified_logp
rvs = [rv.name for rv in model.value_vars]
init_position_dict = model.compute_initial_point()
init_position = [init_position_dict[rv] for rv in rvs]
logprob_fn = get_jaxified_logp(model)
%%time
seed = jax.random.PRNGKey(1234)
adapt = blackjax.window_adaptation(blackjax.nuts, logprob_fn, 1000)
last_state, kernel, _ = adapt.run(seed, init_position)
def inference_loop(rng_key, kernel, initial_state, num_samples):
def one_step(state, rng_key):
state, info = kernel(rng_key, state)
return state, (state, info)
keys = jax.random.split(rng_key, num_samples)
_, (states, infos) = jax.lax.scan(one_step, initial_state, keys)
return states, infos
# Sample from the posterior distribution
states, infos = inference_loop(seed, kernel, last_state, 50_000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let us fire up sqlite3 and see what tables are inside the database.
Step2: Among the four (4) tables above, we'll be using 'history_items' and 'history_visits'. Let's take a look at their schemata.
Step3: The first thing to note is that each history visit table is associated with one history item, as we can tell from 'history_item' column in 'history_visits' table.
Step4: ...As well as our most recent visits.
Step5: Note that, from what I can tell at least, time is expressed as the number of seconds since the beginning of 2001-01-01.
Step6: That was it, our browsing history is in file 'visits.txt'.
Step7: Part 2
Step8: Let us parse the text file we created and extract the timestamps of our visits.
Step9: Let us plot the daily number of pages we've visited for that period of time.
Step10: What are websites we've visited most often?
Step11: Plot the number of visits to the websites we visit most frequently
|
<ASSISTANT_TASK:>
Python Code:
%%bash
cp ~/Library/Safari/History.db ~/Workspace/web_browsing/hs.db
%%script sqlite3 hs.db
.tables
%%script sqlite3 hs.db
.schema history_items
%%script sqlite3 hs.db
.schema history_visits
%%script sqlite3 hs.db
SELECT id, url, visit_count FROM history_items ORDER BY visit_count DESC LIMIT 5;
%%script sqlite3 hs.db
SELECT V.title, I.url, V.visit_time
FROM history_items as I,
(SELECT history_item, title, visit_time FROM history_visits ORDER BY visit_time DESC LIMIT 5) as V
WHERE I.id = V.history_item;
%%bash --out visits
sqlite3 hs.db \
"SELECT I.url, V.visit_time \
FROM history_items as I, \
(SELECT history_item, title, visit_time FROM history_visits) as V \
WHERE I.id = V.history_item;"
f = open('visits.txt', 'w')
f.write(visits)
f.close()
%%bash
head -5 visits.txt
echo "... ... ..."
tail -5 visits.txt
%matplotlib inline
import sys
import matplotlib.pyplot as plt
import time
import datetime
f = open('visits.txt')
tt = [] # stores timestamps of our visits
for line in f:
try:
tokens = line.rstrip().split('|')
time_sec = float(tokens[-1])
tt.append(time_sec)
except:
sys.stderr.write(line)
sys.stderr.write('\n')
f.close()
# sort the timestamps
tt.sort()
# turn into standard POSIX by adding
# the first second of 2001-1-1 UTC
zero_sec = time.mktime(datetime.datetime(2001, 1, 1).timetuple())
for i in range(len(tt)): tt[i] += zero_sec
bin_size = 24 * 3600 # have one bin per 24 hours
num_of_bins = (tt[-1] - tt[0]) / bin_size
# figure out intervals to accomodate number of labels on x-axis
num_of_xlabels = 10
label_bin = (tt[-1] - tt[0]) / (num_of_xlabels - 1)
locs = [tt[0] + label_bin * (i) for i in range(num_of_xlabels)]
labels = map(lambda x: str(datetime.datetime.fromtimestamp(x)).split()[0], locs)
# plot the histogram
plt.figure(figsize=(15, 5))
plt.hist(tt, num_of_bins, range = (tt[0], tt[-1]))
plt.xticks(locs, labels)
plt.ylabel("Daily Visits")
import re
import numpy as np
website_pattern = re.compile('http(s)?://(\w+\.)?(\w+?)\.\w+?/')
verbose = False
f = open('visits.txt')
website_counts = {} # stores counts for each website
addresses = {} # stores the different addresses (base-urls) for each website
for line in f:
try:
# keep only the url, ignore the timestamp
tokens = line.rstrip().split('|')
m = website_pattern.search("".join(tokens[:-1]))
try:
# parse the url to extract the website name
# for example, if the url is http://www.facebook.com/ab238ub
# the website name is 'facebook'
website = m.group(3)
# update count
website_counts[website] = website_counts.get(website, 0) + 1
# keep track of the different base urls that match this website
# for example, if the url is http://www.facebook.com/ab238ub
# the base url is http://www.facebook.com/
if website not in addresses:
addresses[website] = {}
addresses[website][m.group(0)] = addresses[website].get(m.group(0), 0) + 1
except Exception, e:
if verbose:
sys.stderr.write(str(e))
sys.stderr.write('\n')
except:
sys.stderr.write(line)
sys.stderr.write('\n')
f.close()
k = 5 # The number of top websites to present
# Keep the top-k websites
k = min(k, len(website_counts))
top_websites = sorted(website_counts.items(),
key = lambda x: x[1], reverse = True)[:k]
# The number of visits to websites that are not among the top-k
other_visits = sum(x[1] for x in top_websites[k:])
# Let's make a histogram for the top-k websites
plt.figure(figsize = (10, 5)) # create new figure
bar_width = 0.5
xpos = np.arange(len(top_websites)) + bar_width # bar positions on the x-axis
plt.bar(xpos, [w[1] for w in top_websites], bar_width) # places the bars
plt.xticks(xpos+bar_width/2, [w[0] for w in top_websites]) # ticks on x-axis
## For each of the top-k websites, show the most popular
## addresses (base urls)
m = 3 # Show at most m addresses...
pct = 0.90 # ... or stop at 90% of visits
# For each of the top-k websites...
for website_name, website_visits in top_websites:
# ... store the most frequently visited base urls ...
website_addresses = sorted(addresses[website_name].items(),
key = lambda x: x[1], reverse = True)
top_addresses = []; visits = 0.
for address, address_count in website_addresses:
visits += address_count
top_addresses.append((address, address_count))
if len(top_addresses) > m or visits / website_visits >= pct:
break
# ... and the number of visits to addresses that are not among the top
other_num = website_visits - visits
# make a pie-chart for this website
# if it is associated with many addresses
if len(top_addresses) >= m: # TODO change this condition if you want
plt.figure(figsize = (7,7))
labels = [x[0] for x in top_addresses] + ["other"]
sizes = [x[1] for x in top_addresses] + [other_num]
# arbitrary choice of colors -- I like blue
n = float(len(sizes))
colors = [( p / (n + 1.), 0., (1. - p / (n + 1.)))\
for p in xrange(len(sizes))]
colors[-1] = 'grey'
plt.pie(sizes, labels=labels, colors=colors,
labeldistance = 1.1, autopct='%1.1f%%', startangle = 90)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Objetivos
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from IPython.display import Image
Image(filename='../figures/workflow02.png')
import csv
with open('../database.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
print ', '.join(row)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.__version__
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introducing TinyImageNet
Step2: TinyImageNet-100-A classes
Step3: Visualize Examples
Step4: Pretrained model
Step5: Pretrained model performance
Step7: Saliency Maps
Step8: Once you have completed the implementation in the cell above, run the following to visualize some class saliency maps on the validation set of TinyImageNet-100-A.
Step10: Fooling Images
Step11: Run the following to choose a random validation set image that is correctly classified by the network, and then make a fooling image.
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time, os, json
import numpy as np
import skimage.io
import matplotlib.pyplot as plt
from skynet.neural_network.classifiers.pretrained_cnn import PretrainedCNN
from skynet.utils.data_utils import load_tiny_imagenet
from skynet.utils.image_utils import blur_image, deprocess_image
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
data = load_tiny_imagenet('../skynet/datasets/tiny-imagenet-100-A', subtract_mean=True)
for i, names in enumerate(data['class_names']):
print(i, ' '.join('"%s"' % name for name in names))
# Visualize some examples of the training data
classes_to_show = 7
examples_per_class = 5
class_idxs = np.random.choice(len(data['class_names']), size=classes_to_show, replace=False)
for i, class_idx in enumerate(class_idxs):
train_idxs, = np.nonzero(data['y_train'] == class_idx)
train_idxs = np.random.choice(train_idxs, size=examples_per_class, replace=False)
for j, train_idx in enumerate(train_idxs):
img = deprocess_image(data['X_train'][train_idx], data['mean_image'])
plt.subplot(examples_per_class, classes_to_show, 1 + i + classes_to_show * j)
if j == 0:
plt.title(data['class_names'][class_idx][0])
plt.imshow(img)
plt.gca().axis('off')
plt.show()
model = PretrainedCNN(h5_file='../skynet/datasets/pretrained_model.h5')
batch_size = 100
# Test the model on training data
mask = np.random.randint(data['X_train'].shape[0], size=batch_size)
X, y = data['X_train'][mask], data['y_train'][mask]
y_pred = model.loss(X).argmax(axis=1)
print('Training accuracy: ', (y_pred == y).mean())
# Test the model on validation data
mask = np.random.randint(data['X_val'].shape[0], size=batch_size)
X, y = data['X_val'][mask], data['y_val'][mask]
y_pred = model.loss(X).argmax(axis=1)
print('Validation accuracy: ', (y_pred == y).mean())
def compute_saliency_maps(X, y, model):
Compute a class saliency map using the model for images X and labels y.
Input:
- X: Input images, of shape (N, 3, H, W)
- y: Labels for X, of shape (N,)
- model: A PretrainedCNN that will be used to compute the saliency map.
Returns:
- saliency: An array of shape (N, H, W) giving the saliency maps for the input
images.
saliency = None
##############################################################################
# TODO: Implement this function. You should use the forward and backward #
# methods of the PretrainedCNN class, and compute gradients with respect to #
# the unnormalized class score of the ground-truth classes in y. #
##############################################################################
N, _, H, W = X.shape
saliency = np.zeros((N, H, W))
scores, cache = model.forward(X, mode='test')
dscores = np.zeros_like(scores)
dscores[np.arange(N), y] = 1
dX, grads = model.backward(dscores, cache)
# max along the channel dimension
saliency = np.max(np.absolute(dX), axis=1)
pass
##############################################################################
# END OF YOUR CODE #
##############################################################################
return saliency
def show_saliency_maps(mask):
mask = np.asarray(mask)
X = data['X_val'][mask]
y = data['y_val'][mask]
saliency = compute_saliency_maps(X, y, model)
for i in range(mask.size):
plt.subplot(2, mask.size, i + 1)
plt.imshow(deprocess_image(X[i], data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y[i]][0])
plt.subplot(2, mask.size, mask.size + i + 1)
plt.title(mask[i])
plt.imshow(saliency[i])
plt.axis('off')
plt.gcf().set_size_inches(10, 4)
plt.show()
# Show some random images
mask = np.random.randint(data['X_val'].shape[0], size=5)
show_saliency_maps(mask)
# These are some cherry-picked images that should give good results
show_saliency_maps([128, 3225, 2417, 1640, 4619])
def make_fooling_image(X, target_y, model):
Generate a fooling image that is close to X, but that the model classifies
as target_y.
Inputs:
- X: Input image, of shape (1, 3, 64, 64)
- target_y: An integer in the range [0, 100)
- model: A PretrainedCNN
Returns:
- X_fooling: An image that is close to X, but that is classifed as target_y
by the model.
X_fooling = X.copy()
##############################################################################
# TODO: Generate a fooling image X_fooling that the model will classify as #
# the class target_y. Use gradient ascent on the target class score, using #
# the model.forward method to compute scores and the model.backward method #
# to compute image gradients. #
# #
# HINT: For most examples, you should be able to generate a fooling image #
# in fewer than 100 iterations of gradient ascent. #
##############################################################################
N = X.shape[0]
lr = 500
reg = 2e-5
i = 0
while True:
i += 1
scores, cache = model.forward(X_fooling)
if np.argmax(scores, axis=-1) == np.array([target_y]):
print('Fooled image in %dth iteration' % (i))
break
if not i % 100:
print(('iteration %d iteration, y_pred: %s, target_y: %s' %
(i, np.argmax(scores, axis=-1), [target_y])))
dscores = np.zeros_like(scores)
dscores[np.arange(N), [target_y]] = 1
dX, grads = model.backward(dscores, cache)
X_fooling += lr * (dX + reg * (X_fooling - X))
pass
pass
##############################################################################
# END OF YOUR CODE #
##############################################################################
return X_fooling
# Find a correctly classified validation image
while True:
i = np.random.randint(data['X_val'].shape[0])
X = data['X_val'][i:i+1]
y = data['y_val'][i:i+1]
y_pred = model.loss(X)[0].argmax()
if y_pred == y: break
target_y = 67
X_fooling = make_fooling_image(X, target_y, model)
# Make sure that X_fooling is classified as y_target
scores = model.loss(X_fooling)
assert scores[0].argmax() == target_y, 'The network is not fooled!'
# Show original image, fooling image, and difference
plt.subplot(1, 3, 1)
plt.imshow(deprocess_image(X, data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y[0]][0])
plt.subplot(1, 3, 2)
plt.imshow(deprocess_image(X_fooling, data['mean_image'], renorm=True))
plt.title(data['class_names'][target_y][0])
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Difference')
plt.imshow(deprocess_image(X - X_fooling, data['mean_image']))
plt.axis('off')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: User Provided Layer 1 Topology
Step2: Return Value
Step3: Print the first row of the returned Dataframe
Step4: Layer 3 Topology
Step5: Return Value
Step6: Print the first row of the returned Dataframe
|
<ASSISTANT_TASK:>
Python Code:
bf.set_network('generate_questions')
bf.set_snapshot('aristaevpn')
result = bf.q.userProvidedLayer1Edges().answer().frame()
result.head(5)
result.iloc[0]
bf.set_network('generate_questions')
bf.set_snapshot('generate_questions')
result = bf.q.layer3Edges().answer().frame()
result.head(5)
result.iloc[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in the data
Step2: I had to modify the raw data to get it to read in conveniently. I try not to modify raw data formats (for reproducibility purposes) but there didn't seem to be a convenient way otherwise. The problem was the Vtype column is undefined for most of the file, so a fixed-width-file appears to have no column there, which screws up the last few columns. I simply labeled the first three columns as "ajun" to make it look like there was something there. So I will just drop those rows.
Step3: These are the input stars. The absolute magnitude versus effective temperature.
Step4: This is sort-of a Malmquist bias plot. At a given distance, you can detect the brighter stars.
Step5: This is a typical proper motion scatter plot.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
%config InlineBackend.figure_format = "retina"
import pandas as pd
sns.set_context("talk")
names = ['byte_range', 'data_type', 'col_ID', 'desc']
fwf_cols = pd.read_fwf('../data/synthetic/gum_mw_columns.tsv',names=names)
fwf_cols.head()
col_names = fwf_cols.col_ID.values
gum_mw_alt = pd.read_fwf('../data/synthetic/gum_mw.sam', names=col_names)
gum_mw_alt.drop(["Vamp", "Vper", "Vphase", "Vtype"], inplace=True, axis=1)
gum_mw_alt.head()
col_names
gum = gum_mw_alt
plt.figure(figsize=[5, 8])
#plt.plot(gum['V-I'], gum.Mbol, '.')
plt.plot(gum.Teff, gum.Mbol, '.')
plt.xlim(10000, 2000)
plt.ylim(20, -5)
plt.xlabel("$T_{\mathrm{eff}}$")
plt.ylabel("$M_{\mathrm{bol}}$");
plt.figure(figsize=[8, 8])
#plt.plot(gum['V-I'], gum.Mbol, '.')
sc = plt.scatter(gum.r/1000.0, gum.Gmag, c=gum.Teff, s=20, marker='o', vmin=2000, vmax=10000, cmap="Spectral")
plt.xlabel("$d$ (kpc)")
plt.ylabel("$G$")
plt.hlines(12, 0, 10, colors = 'b', linestyles='--')
plt.colorbar(sc)
plt.ylim(20, 5)
plt.xlim(0, 10)
plt.figure(figsize=[8, 8])
plt.plot(gum.pmRA, gum.pmDE, '.', alpha=0.2)
plt.xlabel("$\delta_{\mathrm{RA}}$ (mas/yr)")
plt.ylabel("$\delta_{\mathrm{DEC}}$ (mas/yr)")
plt.figure(figsize=[8, 8])
pm = np.sqrt(gum.pmDE**2 + gum.pmDE**2)
plt.plot(gum.r/1000.0, pm, '.', alpha=0.5)
plt.xlabel("$d$ (kpc)")
plt.ylabel("$\delta$ (mas/yr)")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Lets first define a function which will create a dataset to train on. We want to be able to test a range of datasets, from very small to very large, to see which implementation is faster. We also want a function which will take in the models and evaluate them. Lets define both of those now.
Step4: Lets look first at single dimension Gaussian datasets. We'll look at how many times faster pomegranate is, which means that values > 1 show pomegranate is faster and < 1 show pomegranate is slower. Lets also look at the accuracy of both algorithms. They should have the same accuracy since they implement the same algorithm.
Step5: It looks as if pomegranate is approximately the same speed for training small models but that the prediction time can be a lot faster in pomegranate than in sklearn.
Step6: It looks like, again, pomegranate is around the same speed as sklearn for fitting models, but that it is consistently much faster to make predictions.
Step7: This does show that pomegranate is faster at making predictions but that both are so fast that potentially it doesn't really matter.
Step8: It looks like pomegranate can be around twice as fast at fitting multivariate Gaussian Naive Bayes models than sklearn when there is more than one feature.
Step9: Looks like pomegranate is consistently faster than sklearn at fitting the model but conveges to be approximately the same speed at making predictions in the high dimensional setting. Their accuracies remain identical indicating that the two are learning the same model.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import seaborn, time
seaborn.set_style('whitegrid')
from sklearn.naive_bayes import GaussianNB
from pomegranate import *
def create_dataset(n_samples, n_dim, n_classes):
Create a random dataset with n_samples in each class.
X = numpy.concatenate([numpy.random.randn(n_samples, n_dim) + i for i in range(n_classes)])
y = numpy.concatenate([numpy.zeros(n_samples) + i for i in range(n_classes)])
return X, y
def plot(fit, predict, skl_error, pom_error, sizes, xlabel):
Plot the results.
idx = numpy.arange(fit.shape[1])
plt.figure(figsize=(14, 4))
plt.plot(fit.mean(axis=0), c='c', label="Fitting")
plt.plot(predict.mean(axis=0), c='m', label="Prediction")
plt.plot([0, fit.shape[1]], [1, 1], c='k', label="Baseline")
plt.fill_between(idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3)
plt.fill_between(idx, predict.min(axis=0), predict.max(axis=0), color='m', alpha=0.3)
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('pomegranate is x times faster', fontsize=14)
plt.legend(fontsize=12, loc=4)
plt.show()
plt.figure(figsize=(14, 4))
plt.plot(1 - skl_error.mean(axis=0), alpha=0.5, c='c', label="sklearn accuracy")
plt.plot(1 - pom_error.mean(axis=0), alpha=0.5, c='m', label="pomegranate accuracy")
plt.fill_between(idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3)
plt.fill_between(idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3)
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.legend(fontsize=14)
plt.show()
sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(size, 1, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "samples per component")
sizes = numpy.arange(2, 21).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(50000 // size, 1, size)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "number of classes")
X, y = create_dataset(50000, 1, 2)
skl = GaussianNB()
skl.fit(X, y)
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
%timeit skl.predict(X)
%timeit pom.predict(X)
sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(size, 5, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "samples per component")
sizes = numpy.arange(5, 101, 5).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(50000, size, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "dimensions")
sizes = numpy.around( numpy.exp( numpy.arange(8, 16) ) ).astype('int')
n, m = sizes.shape[0], 20
skl_time, pom_time = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
skl = GaussianNB()
pom = NaiveBayes([IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)]),
IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)])])
for l in range(5):
X, y = create_dataset(size, 5, 2)
tic = time.time()
skl.partial_fit(X, y, classes=[0, 1])
skl_time[i, j] += time.time() - tic
tic = time.time()
pom.summarize( X, y )
pom_time[i, j] += time.time() - tic
tic = time.time()
pom.from_summaries()
pom_time[i, j] += time.time() - tic
skl_predictions = skl.predict( X )
pom_predictions = pom.predict( X )
skl_error[i, j] = ( y != skl_predictions ).mean()
pom_error[i, j] = ( y != pom_predictions ).mean()
fit = skl_time / pom_time
idx = numpy.arange(fit.shape[1])
plt.figure( figsize=(14, 4))
plt.plot( fit.mean(axis=0), c='c', label="Fitting")
plt.plot( [0, fit.shape[1]], [1, 1], c='k', label="Baseline" )
plt.fill_between( idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3 )
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('pomegranate is x times faster', fontsize=14)
plt.legend(fontsize=12, loc=4)
plt.show()
plt.figure( figsize=(14, 4))
plt.plot( 1 - skl_error.mean(axis=0), alpha=0.5, c='c', label="sklearn accuracy" )
plt.plot( 1 - pom_error.mean(axis=0), alpha=0.5, c='m', label="pomegranate accuracy" )
plt.fill_between( idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3 )
plt.fill_between( idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3 )
plt.xticks( idx, sizes, rotation=65, fontsize=14)
plt.xlabel('Batch Size', fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.legend(fontsize=14)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The model_to_pymatbridge function will send the model to the workspace with the given variable name.
Step2: Now in the MATLAB workspace, the variable name 'model' holds a COBRA toolbox struct encoding the model.
Step3: First, we have to initialize the COBRA toolbox in MATLAB.
Step4: Commands from the COBRA toolbox can now be run on the model
Step5: FBA in the COBRA toolbox should give the same result as cobrapy (but maybe just a little bit slower
|
<ASSISTANT_TASK:>
Python Code:
%load_ext pymatbridge
import cobra.test
m = cobra.test.create_test_model("textbook")
from cobra.io.mat import model_to_pymatbridge
model_to_pymatbridge(m, variable_name="model")
%%matlab
model
%%matlab --silent
warning('off'); % this works around a pymatbridge bug
addpath(genpath('~/cobratoolbox/'));
initCobraToolbox();
%%matlab
optimizeCbModel(model)
%time
m.optimize().f
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Format Data
Step2: Linear Regression
|
<ASSISTANT_TASK:>
Python Code:
from os import sys, path
sys.path.append(path.abspath('../src/regression'))
import linear_regression
from linear_regression import *
%matplotlib inline
# We use the london market to get the stock values of gold and silver
gold = quandl.get("LBMA/GOLD", returns="numpy", start_date="2015-01-01")
silver = quandl.get("LBMA/SILVER", returns="numpy", start_date="2015-01-01")
copper = quandl.get("CHRIS/CME_SI3", returns="numpy", start_date="2015-01-01")
# Retrieve gold and silver values in $ by day
XY_gold = stock_arr_to_XY(gold)
XY_silver = stock_arr_to_XY(silver)
XY_copper = stock_arr_to_XY(copper)
# Filter arrays such that gold and silver shares the same Xs
XY_gold, XY_silver = filter_on_same_X(XY_gold, XY_silver)
XY_gold, XY_copper = filter_on_same_X(XY_gold, XY_copper)
XY_copper, XY_silver = filter_on_same_X(XY_copper, XY_silver)
x_gold, y_gold = XY_gold
x_silver, y_silver = XY_silver
x_copper, y_copper = XY_copper
# Plot the data
plot_data(XY_silver, XY_gold, 'silver', 'gold')
plot_data(XY_copper, XY_gold, 'copper', 'gold')
# Rename y_silver to X and y_gold to Y
X, Y = [np.array(y_silver), ], np.array(y_gold)
# Initilize the parameters
Ws = [0.5, 0.5]
alphas = (0.0001, 0.01)
# Load Trainer
t = Trainer(X, Y, Ws, alphas)
# Define Prediction and Loss
t.pred = lambda X : np.multiply(X[0], t.Ws[0]) + t.Ws[1]
t.loss = lambda : (np.power((t.Y - t.pred(X)), 2) * 1 / 2.).mean()
# Define the gradient functions
dl_dp = lambda : -(t.Y - t.pred(X))
dl_dw0 = lambda : np.multiply(dl_dp(), X[0]).mean()
dl_dw1 = lambda : dl_dp().mean()
t.dWs = (dl_dw0, dl_dw1)
# Start training
anim = t.animated_train(is_notebook=True)
# Show it
from IPython.display import HTML
HTML(anim.to_html5_video())
print "Final Loss is %f" % t.loss()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the PDF in PDFPlumber
Step2: Let's look at the first 15 characters on the first page of the PDF
Step3: Extract the precint ID
Step4: We can do the same for the number of ballots cast
Step5: ... and for the number of registered voters in each precinct
Step6: Getting the results for each race is a bit trickier
Step7: Let's restructure that slightly, so that each row contains information about the relevant race
Step8: From there, we can start to do some calculations
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import pdfplumber
import re
pdf = pdfplumber.open("2014-bulletin-first-10-pages.pdf")
print(len(pdf.pages))
first_page = pdf.pages[0]
chars = pd.DataFrame(first_page.chars)
chars.head(15)
pd.DataFrame(first_page.crop((0, 37, first_page.width / 2, 44 )).chars)
def get_precinct_id(page):
cropped = page.crop((0, 37, page.width / 2, 44 ))
text = "".join((c["text"] for c in cropped.chars))
trimmed = re.sub(r" +", "|", text)
return trimmed
for page in pdf.pages:
print(get_precinct_id(page))
def get_ballots_cast(page):
cropped = page.crop((0, 48, page.width / 3, 60))
text = "".join((c["text"] for c in cropped.chars))
count = int(text.split(" ")[0])
return count
for page in pdf.pages:
print(get_ballots_cast(page))
def get_registered_voters(page):
cropped = page.crop((0, 62, page.width / 3, 74))
text = "".join((c["text"] for c in cropped.chars))
count = int(text.split(" ")[0])
return count
for page in pdf.pages:
print(get_registered_voters(page))
def get_results_rows(page):
first_col = page.crop((0, 77, 212, page.height))
table = first_col.extract_table(
v=(0, 158, 180, 212),
h="gutters",
x_tolerance=1)
return table
get_results_rows(first_page)
def get_results_table(page):
rows = get_results_rows(page)
results = []
race = None
for row in rows:
name, affil, votes = row
if name == "VOTER NOMINATED": continue
if votes == None:
race = name
else:
results.append((race, name, affil, int(votes)))
results_df = pd.DataFrame(results, columns=[ "race", "name", "party", "votes" ])
return results_df
get_results_table(first_page)
def get_jerry_brown_pct(page):
table = get_results_table(page)
brown_votes = table[table["name"] == "EDMUND G BROWN"]["votes"].iloc[0]
kashkari_votes = table[table["name"] == "NEEL KASHKARI"]["votes"].iloc[0]
brown_prop = float(brown_votes) / (kashkari_votes + brown_votes)
return (100 * brown_prop).round(1)
for page in pdf.pages:
precinct_id = get_precinct_id(page)
brown = get_jerry_brown_pct(page)
print("{0}: {1}%".format(precinct_id, brown))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Application Context
Step2: Global Database Replication Group
Step3: Key Synchronization
Step4: Custom Domain Names
Step5: East
Step6: West
Step7: Route 53 Set Up
Step8: Health Check - East
Step9: Health Check - West
Step10: Route 53 CNames and Routing Policy
|
<ASSISTANT_TASK:>
Python Code:
# SDK Imports
import boto3
cformation_east = boto3.client('cloudformation', region_name='us-east-1')
cformation_west = boto3.client('cloudformation', region_name='us-west-2')
gw_east = boto3.client('apigateway', region_name='us-east-1')
gw_west = boto3.client('apigateway', region_name='us-west-2')
def get_stack_name(service, stage):
return '{}-{}'.format(service,stage)
def get_endpoint(cf_client, stack_name):
response = cf_client.describe_stacks(
StackName=stack_name
)
outputs = response['Stacks'][0]['Outputs']
endpoint = [d for d in outputs if d['OutputKey'] == 'ServiceEndpoint'][0]['OutputValue']
return endpoint
def get_plan_and_api_ids(gw_client, service, stage):
response = gw_client.get_usage_plans()
plans = response['items']
stack_name = get_stack_name(service, stage)
plan = [d for d in plans if d['name'] == stack_name][0]
plan_id = plan['id']
api_stage = [d for d in plan['apiStages'] if d['stage'] == stage][0]
api_id = api_stage['apiId']
return plan_id, api_id
import uuid
def generate_api_key():
return str(uuid.uuid4())
def create_api_key_and_add_to_plan(gw_client, key_name, key_val, plan_id):
create_key_response = gw_client.create_api_key(
name=key_name,
enabled=True,
generateDistinctId=True,
value=key_val
)
key_id = create_key_response['id']
plan_key_response = gw_client.create_usage_plan_key(
usagePlanId=plan_id,
keyId=key_id,
keyType='API_KEY'
)
return id, key_id
def form_s3_url_prefix(region):
prefix = ''
if region == 'us-east-1':
prefix = 'https://s3.amazonaws.com'
else:
prefix = 'https://s3-' + region + '.amazonaws.com'
return prefix
# Create a key and add it to the usage plan?
# - create_api_key - need key id output
# - you can get the usage plan id and the api id via get_usage_plan and matching the plan with same name
# as the stack
# - create_usage_plan_key associates the key to the plan: inputs are plan id, key id
service = 'serverless-rest-api-with-dynamodb'
stage = 'dev'
cross_region_key_name = 'xregion_key'
bucket_name = 'xtds-cf-templates'
primary_region = 'us-east-1'
stack_name = get_stack_name(service, stage)
east_endpoint = get_endpoint(cformation_east, stack_name)
print east_endpoint
west_endpoint = get_endpoint(cformation_west, stack_name)
print west_endpoint
table_name = service + '-' + stage
print table_name
ddb_client = boto3.client('dynamodb')
response = ddb_client.create_global_table(
GlobalTableName=table_name,
ReplicationGroup=[
{
'RegionName': 'us-east-1'
},
{
'RegionName': 'us-west-2'
},
]
)
print response
key_val = generate_api_key()
print key_val
# Create east key and add to plan
plan_id_east, api_id_east = get_plan_and_api_ids(gw_east, service, stage)
key_val_east, key_id_east = create_api_key_and_add_to_plan(gw_east, cross_region_key_name, key_val, plan_id_east)
plan_id_west, api_id_west = get_plan_and_api_ids(gw_west, service, stage)
key_val_west, key_id_west = create_api_key_and_add_to_plan(gw_west, cross_region_key_name, key_val, plan_id_west)
domain_name = 'superapi.elcaro.net'
# Custom domains hang around even when the APIs the are associated with are deleted. In
# this cell we figure out if the following cells need to be executed.
regional_domain_name = ''
response = gw_east.get_domain_names()
items = response['items']
items = [x for x in items if x['domainName'] == domain_name]
if len(items) == 1:
regional_domain_name = items[0]['regionalDomainName']
print 'Custom domain name for API exists with regional domain name {}'.format(regional_domain_name)
print '===> Skip the rest of the cells in this section'
else:
print '===> Custom domain does not exist - continue executing the cells in this section of the notebook'
# We need to select the certificate associated with out domain name
acm_client = boto3.client('acm')
response = acm_client.list_certificates()
summaryList = response['CertificateSummaryList']
print summaryList
domain_cert = [x for x in summaryList if x['DomainName'] == domain_name][0]
print domain_cert
cert_arn = domain_cert['CertificateArn']
print cert_arn
# Create the domain name
response = gw_east.create_domain_name(
domainName=domain_name,
regionalCertificateArn=cert_arn,
endpointConfiguration={
'types': [
'REGIONAL'
]
}
)
print response
regional_domain_name = response['regionalDomainName']
print regional_domain_name
# Get the rest api id
response = gw_east.get_rest_apis()
print response
items = response['items']
item = [x for x in items if x['name'] == stage + '-' + service][0]
print item
rest_api_id = item['id']
print rest_api_id
# Create custom domain mapping for our stage - here we subsume the stage into the mapping
response = gw_east.create_base_path_mapping(
domainName=domain_name,
basePath='',
restApiId=rest_api_id,
stage=stage
)
print response
# East health check endpoint
east_hc_cname = rest_api_id + '.execute-api.us-east-1.amazonaws.com'
print east_hc_cname
# Custom domains hang around even when the APIs the are associated with are deleted. In
# this cell we figure out if the following cells need to be executed.
west_domain_name = ''
response = gw_west.get_domain_names()
items = response['items']
items = [x for x in items if x['domainName'] == domain_name]
if len(items) == 1:
west_domain_name = items[0]['regionalDomainName']
print 'Custom domain name for API exists with regional domain name {}'.format(west_domain_name)
print '===> Skip the rest of the cells in this section'
else:
print '===> Custom domain does not exist - continue executing the cells in this section of the notebook'
acm_west = boto3.client('acm', region_name='us-west-2')
response = acm_west.list_certificates()
summaryList = response['CertificateSummaryList']
print summaryList
domain_cert = [x for x in summaryList if x['DomainName'] == domain_name][0]
print domain_cert
cert_arn = domain_cert['CertificateArn']
print cert_arn
# Create the domain name
response = gw_west.create_domain_name(
domainName=domain_name,
regionalCertificateArn=cert_arn,
endpointConfiguration={
'types': [
'REGIONAL'
]
}
)
print response
west_domain_name = response['regionalDomainName']
print west_domain_name
# Get the rest api id
response = gw_west.get_rest_apis()
print response
items = response['items']
item = [x for x in items if x['name'] == stage + '-' + service][0]
print item
rest_api_id = item['id']
print rest_api_id
# Create custom domain mapping for our stage - here we subsume the stage into the mapping
response = gw_west.create_base_path_mapping(
domainName=domain_name,
basePath='',
restApiId=rest_api_id,
stage=stage
)
print response
# West health check endpoint
west_hc_cname = rest_api_id + '.execute-api.us-west-2.amazonaws.com'
print west_hc_cname
r53_client = boto3.client('route53')
caller_ref = generate_api_key() # Note this generates a uuid string that can be used as a key
print caller_ref
print regional_domain_name
# East health check
response = r53_client.create_health_check(
CallerReference=caller_ref,
HealthCheckConfig={
'Type':'HTTPS',
'ResourcePath':'/' + stage + '/todos/health',
'FullyQualifiedDomainName':east_hc_cname
}
)
print response
hc_id = response['HealthCheck']['Id']
print 'health check id: {}'.format(hc_id)
# Now tag the health check name
tag_resp = r53_client.change_tags_for_resource(
ResourceType='healthcheck',
ResourceId=hc_id,
AddTags=[
{
'Key':'Name',
'Value':'east-api-hc'
},
]
)
print tag_resp
hc_resp = r53_client.get_health_check_status(
HealthCheckId=hc_id
)
print hc_resp
caller_ref = generate_api_key() # Note this generates a uuid string that can be used as a key
print caller_ref
print west_domain_name
# West health check
response = r53_client.create_health_check(
CallerReference=caller_ref,
HealthCheckConfig={
'Type':'HTTPS',
'ResourcePath': '/' + stage + '/todos/health',
'FullyQualifiedDomainName':west_hc_cname
}
)
print response
hc_id = response['HealthCheck']['Id']
print 'health check id: {}'.format(hc_id)
# Now tag the health check name
tag_resp = r53_client.change_tags_for_resource(
ResourceType='healthcheck',
ResourceId=hc_id,
AddTags=[
{
'Key':'Name',
'Value':'west-api-hc'
},
]
)
print tag_resp
hosted_zone = 'elcaro.net.'
response = r53_client.list_hosted_zones()
zones = response['HostedZones']
zones = [x for x in zones if x['Name'] == hosted_zone]
hosted_zone_id = zones[0]['Id']
print hosted_zone_id
# Um, grab the health check ids again - we'll fix this later
response = r53_client.list_health_checks()
health_checks = response['HealthChecks']
print health_checks
east_check = [x for x in health_checks if x['HealthCheckConfig']['FullyQualifiedDomainName'] == east_hc_cname][0]['Id']
print east_check
west_check = [x for x in health_checks if x['HealthCheckConfig']['FullyQualifiedDomainName'] == west_hc_cname][0]['Id']
print west_check
response = r53_client.list_resource_record_sets(
HostedZoneId=hosted_zone_id
)
print response
response = r53_client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': domain_name + '.',
'Type': 'CNAME',
'SetIdentifier': 'east',
'Weight': 50,
'TTL': 30,
'ResourceRecords': [
{
'Value': regional_domain_name
},
],
'HealthCheckId': east_check
}
},
{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': domain_name + '.',
'Type': 'CNAME',
'SetIdentifier': 'west',
'Weight': 50,
'TTL': 30,
'ResourceRecords': [
{
'Value': west_domain_name
},
],
'HealthCheckId': west_check
}
}
]
}
)
print response
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot the phantom data, lowpassed to get rid of high-frequency artifacts.
Step2: Now we can clean the data with OTP, lowpass, and plot. The flux jumps have
Step3: We can also look at the effect on single-trial phantom localization.
|
<ASSISTANT_TASK:>
Python Code:
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import mne
import numpy as np
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
print(__doc__)
dipole_number = 1
data_path = bst_phantom_elekta.data_path()
raw = read_raw_fif(
op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif'))
raw.crop(40., 50.).load_data()
order = list(range(160, 170))
raw.copy().filter(0., 40.).plot(order=order, n_channels=10)
raw_clean = mne.preprocessing.oversampled_temporal_projection(raw)
raw_clean.filter(0., 40.)
raw_clean.plot(order=order, n_channels=10)
def compute_bias(raw):
events = find_events(raw, 'STI201', verbose=False)
events = events[1:] # first one has an artifact
tmin, tmax = -0.2, 0.1
epochs = mne.Epochs(raw, events, dipole_number, tmin, tmax,
baseline=(None, -0.01), preload=True, verbose=False)
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None,
verbose=False)
cov = mne.compute_covariance(epochs, tmax=0, method='oas',
rank=None, verbose=False)
idx = epochs.time_as_index(0.036)[0]
data = epochs.get_data()[:, :, idx].T
evoked = mne.EvokedArray(data, epochs.info, tmin=0.)
dip = fit_dipole(evoked, cov, sphere, n_jobs=None, verbose=False)[0]
actual_pos = mne.dipole.get_phantom_dipoles()[0][dipole_number - 1]
misses = 1000 * np.linalg.norm(dip.pos - actual_pos, axis=-1)
return misses
bias = compute_bias(raw)
print('Raw bias: %0.1fmm (worst: %0.1fmm)'
% (np.mean(bias), np.max(bias)))
bias_clean = compute_bias(raw_clean)
print('OTP bias: %0.1fmm (worst: %0.1fmm)'
% (np.mean(bias_clean), np.max(bias_clean),))
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.